code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n'
def A_ ( a , a , a=8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def A_ ( a , a=5_1_2 , a=5_1_2 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE_ : List[str] = arr.astype(np.floataa ) / 1_2_7.5 - 1
SCREAMING_SNAKE_CASE_ : str = np.transpose(SCREAMING_SNAKE_CASE__ , [2, 0, 1] )
SCREAMING_SNAKE_CASE_ : Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
return image
class _A ( SCREAMING_SNAKE_CASE_):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = min(int(num_inference_steps * strength ) , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_SCREAMING_SNAKE_CASE )}" )
SCREAMING_SNAKE_CASE_ : Optional[int] = image.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE_ : Any = image
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_SCREAMING_SNAKE_CASE )
]
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
else:
SCREAMING_SNAKE_CASE_ : Dict = self.movq.encode(_SCREAMING_SNAKE_CASE ).latent_dist.sample(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE_ : str = init_latents.shape
SCREAMING_SNAKE_CASE_ : List[str] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
# get latents
SCREAMING_SNAKE_CASE_ : int = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = init_latents
return latents
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE_ : int = torch.device(f"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ : Dict = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 0.3 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self._execution_device
SCREAMING_SNAKE_CASE_ : List[Any] = guidance_scale > 1.0
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : str = image_embeds.shape[0]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : Dict = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = [image]
if not all(isinstance(_SCREAMING_SNAKE_CASE , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_SCREAMING_SNAKE_CASE ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([prepare_image(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self.movq.encode(_SCREAMING_SNAKE_CASE )["""latents"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = latents.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE_ : List[Any] = downscale_height_and_width(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_latents(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ : Tuple = {"""image_embeds""": image_embeds}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_ : Dict = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : Tuple = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , )[0]
# post-processing
SCREAMING_SNAKE_CASE_ : Dict = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ : Dict = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : List[str] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : int = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 253 |
import torch
from torch import nn
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : int=1 , lowerCAmelCase : List[Any]=False) -> str:
"""simple docstring"""
super().__init__()
_snake_case : List[str] = n_token
_snake_case : Any = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Dict = [0] + self.cutoffs
_snake_case : Optional[Any] = div_val
_snake_case : Tuple = self.cutoffs[0]
_snake_case : List[str] = len(self.cutoffs) - 1
_snake_case : str = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_snake_case : Any = nn.Parameter(torch.zeros(self.n_clusters))
_snake_case : Tuple = nn.ModuleList()
_snake_case : int = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
else:
self.out_projs.append(lowerCAmelCase)
self.out_layers.append(nn.Linear(lowerCAmelCase , lowerCAmelCase))
else:
for i in range(len(self.cutoffs)):
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
self.out_layers.append(nn.Linear(lowerCAmelCase , r_idx - l_idx))
_snake_case : Tuple = keep_order
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
if proj is None:
_snake_case : List[Any] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[str] = nn.functional.linear(lowerCAmelCase , proj.t().contiguous())
_snake_case : Optional[int] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=False) -> Tuple:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : List[str] = hidden[..., :-1, :].contiguous()
_snake_case : int = labels[..., 1:].contiguous()
_snake_case : int = hidden.view(-1 , hidden.size(-1))
_snake_case : str = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
_snake_case : List[Any] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_snake_case : int = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_snake_case : Optional[int] = labels != -100
_snake_case : Union[str, Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Union[str, Any] = (
-nn.functional.log_softmax(lowerCAmelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Any = self.out_layers[i].weight
_snake_case : Optional[int] = self.out_layers[i].bias
if i == 0:
_snake_case : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : List[Any] = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Dict = nn.functional.log_softmax(lowerCAmelCase , dim=1)
if labels is None:
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_snake_case : Optional[Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Optional[int] = (labels >= l_idx) & (labels < r_idx)
_snake_case : Dict = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : Dict = labels.index_select(0 , lowerCAmelCase) - l_idx
_snake_case : List[Any] = head_logprob.index_select(0 , lowerCAmelCase)
_snake_case : Dict = hidden.index_select(0 , lowerCAmelCase)
else:
_snake_case : Optional[Any] = hidden
if i == 0:
if labels is not None:
_snake_case : str = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_snake_case : int = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : List[str] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_snake_case : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : int = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
if self.n_clusters == 0:
_snake_case : Optional[Any] = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Tuple = self.out_layers[i].weight
_snake_case : Any = self.out_layers[i].bias
if i == 0:
_snake_case : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : Union[str, Any] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Any = hidden.new_empty((head_logit.size(0), self.n_token))
_snake_case : Optional[Any] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : str = weights[i], biases[i], self.out_projs[i]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : str = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 317 | 0 |
"""simple docstring"""
import argparse
import os
import re
A_ : List[str] ="""src/transformers"""
# Pattern that looks at the indentation in a line.
A_ : str =re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
A_ : str =re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A_ : List[Any] =re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
A_ : Optional[Any] =re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A_ : List[str] =re.compile(R"""\[([^\]]+)\]""")
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict )-> List[str]:
_lowerCamelCase = _re_indent.search(snake_case )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : Union[str, Any]="" , snake_case : List[str]=None , snake_case : List[str]=None )-> List[Any]:
_lowerCamelCase = 0
_lowerCamelCase = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(snake_case ):
index += 1
_lowerCamelCase = ['\n'.join(lines[:index] )]
else:
_lowerCamelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCamelCase = [lines[index]]
index += 1
while index < len(snake_case ) and (end_prompt is None or not lines[index].startswith(snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(snake_case ) )
if index < len(snake_case ) - 1:
_lowerCamelCase = [lines[index + 1]]
index += 1
else:
_lowerCamelCase = []
else:
blocks.append('\n'.join(snake_case ) )
_lowerCamelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case ) > 0:
blocks.append('\n'.join(snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple )-> Any:
def _inner(snake_case : str ):
return key(snake_case ).lower().replace('_' , '' )
return _inner
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict , snake_case : Union[str, Any]=None )-> Any:
# If no key is provided, we use a noop.
def noop(snake_case : Union[str, Any] ):
return x
if key is None:
_lowerCamelCase = noop
# Constants are all uppercase, they go first.
_lowerCamelCase = [obj for obj in objects if key(snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCamelCase = [obj for obj in objects if key(snake_case )[0].isupper() and not key(snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCamelCase = [obj for obj in objects if not key(snake_case )[0].isupper()]
_lowerCamelCase = ignore_underscore(snake_case )
return sorted(snake_case , key=snake_case ) + sorted(snake_case , key=snake_case ) + sorted(snake_case , key=snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : Any )-> List[Any]:
# This inner function sort imports between [ ].
def _replace(snake_case : Tuple ):
_lowerCamelCase = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCamelCase = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(snake_case )] ) + "]"
_lowerCamelCase = import_statement.split('\n' )
if len(snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCamelCase = 2 if lines[1].strip() == '[' else 1
_lowerCamelCase = [(i, _re_strip_line.search(snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCamelCase = sort_objects(snake_case , key=lambda snake_case : x[1] )
_lowerCamelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCamelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCamelCase = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase = keys[:-1]
_lowerCamelCase = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(snake_case )] )
return "\n".join(snake_case )
else:
# Finally we have to deal with imports fitting on one line
_lowerCamelCase = _re_bracket_content.sub(_replace , snake_case )
return import_statement
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] , snake_case : Dict=True )-> Optional[int]:
with open(snake_case , encoding='utf-8' ) as f:
_lowerCamelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCamelCase = split_code_in_indented_blocks(
snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCamelCase = main_blocks[block_idx]
_lowerCamelCase = block.split('\n' )
# Get to the start of the imports.
_lowerCamelCase = 0
while line_idx < len(snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCamelCase = len(snake_case )
else:
line_idx += 1
if line_idx >= len(snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCamelCase = '\n'.join(block_lines[line_idx:-1] )
_lowerCamelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCamelCase = split_code_in_indented_blocks(snake_case , indent_level=snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCamelCase = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCamelCase = [(pattern.search(snake_case ).groups()[0] if pattern.search(snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCamelCase = [(i, key) for i, key in enumerate(snake_case ) if key is not None]
_lowerCamelCase = [x[0] for x in sorted(snake_case , key=lambda snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCamelCase = 0
_lowerCamelCase = []
for i in range(len(snake_case ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCamelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(snake_case )
count += 1
# And we put our main block back together with its first and last line.
_lowerCamelCase = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict=True )-> Tuple:
_lowerCamelCase = []
for root, _, files in os.walk(snake_case ):
if "__init__.py" in files:
_lowerCamelCase = sort_imports(os.path.join(snake_case , '__init__.py' ) , check_only=snake_case )
if result:
_lowerCamelCase = [os.path.join(snake_case , '__init__.py' )]
if len(snake_case ) > 0:
raise ValueError(f'Would overwrite {len(snake_case )} files, run `make style`.' )
if __name__ == "__main__":
A_ : List[str] =argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
A_ : List[Any] =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 80 |
"""simple docstring"""
from __future__ import annotations
A_ : List[Any] =list[tuple[int, int]]
A_ : Tuple =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A_ : List[str] =([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __a :
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCamelCase = pos_x
_lowerCamelCase = pos_y
_lowerCamelCase = (pos_y, pos_x)
_lowerCamelCase = goal_x
_lowerCamelCase = goal_y
_lowerCamelCase = g_cost
_lowerCamelCase = parent
_lowerCamelCase = self.calculate_heuristic()
def snake_case_ ( self ):
_lowerCamelCase = abs(self.pos_x - self.goal_x )
_lowerCamelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , a__ ):
return self.f_cost < other.f_cost
class __a :
def __init__( self , a__ , a__ ):
_lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a__ )
_lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a__ )
_lowerCamelCase = [self.start]
_lowerCamelCase = []
_lowerCamelCase = False
def snake_case_ ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_lowerCamelCase = True
return self.retrace_path(a__ )
self.closed_nodes.append(a__ )
_lowerCamelCase = self.get_successors(a__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a__ )
else:
# retrieve the best current path
_lowerCamelCase = self.open_nodes.pop(self.open_nodes.index(a__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a__ )
else:
self.open_nodes.append(a__ )
if not self.reached:
return [self.start.pos]
return None
def snake_case_ ( self , a__ ):
_lowerCamelCase = []
for action in delta:
_lowerCamelCase = parent.pos_x + action[1]
_lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a__ , a__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a__ , ) )
return successors
def snake_case_ ( self , a__ ):
_lowerCamelCase = node
_lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCamelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
A_ : str =(0, 0)
A_ : Tuple =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
A_ : List[str] =GreedyBestFirst(init, goal)
A_ : Optional[int] =greedy_bf.search()
if path:
for pos_x, pos_y in path:
A_ : Optional[Any] =2
for elem in grid:
print(elem)
| 80 | 1 |
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = '''bridgetower_vision_model'''
def __init__( self , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=2_88 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Any:
super().__init__(**lowerCAmelCase_ )
_A = hidden_size
_A = num_hidden_layers
_A = num_channels
_A = patch_size
_A = image_size
_A = initializer_factor
_A = layer_norm_eps
_A = stop_gradient
_A = share_layernorm
_A = remove_last_layer
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''bridgetower_text_model'''
def __init__( self , lowerCAmelCase_=5_02_65 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_14 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = initializer_factor
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = pad_token_id
_A = bos_token_id
_A = eos_token_id
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''bridgetower'''
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=7_68 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_="add" , lowerCAmelCase_=12 , lowerCAmelCase_=6 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> int:
# TODO: remove this once the Hub files are updated.
_A = kwargs.pop("""text_config_dict""" , lowerCAmelCase_ )
_A = kwargs.pop("""vision_config_dict""" , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
_A = share_cross_modal_transformer_layers
_A = hidden_act
_A = hidden_size
_A = initializer_factor
_A = layer_norm_eps
_A = share_link_tower_layers
_A = link_tower_type
_A = num_attention_heads
_A = num_hidden_layers
_A = tie_word_embeddings
_A = init_layernorm_from_vision_encoder
if text_config is None:
_A = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_A = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_A = BridgeTowerTextConfig(**lowerCAmelCase_ )
_A = BridgeTowerVisionConfig(**lowerCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output
| 180 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowerCamelCase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
lowerCamelCase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A__ ( _lowerCamelCase):
A_ : Tuple = 'whisper'
A_ : int = ['past_key_values']
A_ : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=5_18_65 , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=15_36 , _SCREAMING_SNAKE_CASE=15_36 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=5_02_57 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=15_00 , _SCREAMING_SNAKE_CASE=4_48 , _SCREAMING_SNAKE_CASE=5_02_56 , _SCREAMING_SNAKE_CASE=5_02_56 , _SCREAMING_SNAKE_CASE=5_02_56 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=[2_20, 5_02_56] , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=7 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Dict = num_mel_bins
__lowerCAmelCase : List[str] = d_model
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : Optional[int] = encoder_attention_heads
__lowerCAmelCase : Dict = decoder_layers
__lowerCAmelCase : Optional[Any] = decoder_attention_heads
__lowerCAmelCase : str = decoder_ffn_dim
__lowerCAmelCase : Dict = encoder_ffn_dim
__lowerCAmelCase : List[Any] = dropout
__lowerCAmelCase : Union[str, Any] = attention_dropout
__lowerCAmelCase : int = activation_dropout
__lowerCAmelCase : Dict = activation_function
__lowerCAmelCase : str = init_std
__lowerCAmelCase : str = encoder_layerdrop
__lowerCAmelCase : Dict = decoder_layerdrop
__lowerCAmelCase : Optional[Any] = use_cache
__lowerCAmelCase : str = encoder_layers
__lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : Union[str, Any] = max_source_positions
__lowerCAmelCase : List[str] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : int = classifier_proj_size
__lowerCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : int = apply_spec_augment
__lowerCAmelCase : str = mask_time_prob
__lowerCAmelCase : Dict = mask_time_length
__lowerCAmelCase : Union[str, Any] = mask_time_min_masks
__lowerCAmelCase : int = mask_feature_prob
__lowerCAmelCase : List[Any] = mask_feature_length
__lowerCAmelCase : Any = mask_feature_min_masks
__lowerCAmelCase : Dict = median_filter_width
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , suppress_tokens=_SCREAMING_SNAKE_CASE , begin_suppress_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class A__ ( _lowerCamelCase):
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCAmelCase : Union[str, Any] = {0: 'batch'}
else:
__lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='inputs' )
return common_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 2_20_50 , _SCREAMING_SNAKE_CASE = 5.0 , _SCREAMING_SNAKE_CASE = 2_20 , ):
__lowerCAmelCase : int = OrderedDict()
__lowerCAmelCase : int = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , time_duration=_SCREAMING_SNAKE_CASE , frequency=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = encoder_inputs['input_features'].shape[2]
__lowerCAmelCase : str = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = encoder_inputs.pop('input_features' )
__lowerCAmelCase : Dict = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCAmelCase : int = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def __lowerCamelCase ( self ):
return 1E-3 | 182 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Optional[int] = ShapEPipeline
A_ : str = ['prompt']
A_ : Any = ['prompt']
A_ : List[Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Optional[int] = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : int = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : Union[str, Any] = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.dummy_prior
__lowerCAmelCase : str = self.dummy_text_encoder
__lowerCAmelCase : List[Any] = self.dummy_tokenizer
__lowerCAmelCase : str = self.dummy_renderer
__lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'cpu'
__lowerCAmelCase : int = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = output.images[0]
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : str = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = 1
__lowerCAmelCase : List[Any] = 2
__lowerCAmelCase : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Dict = batch_size * [inputs[key]]
__lowerCAmelCase : Any = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__lowerCAmelCase : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : List[str] = pipe(
'a shark' , generator=_SCREAMING_SNAKE_CASE , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 182 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A_ :int = None
A_ :Optional[int] = logging.get_logger(__name__)
A_ :Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ :int = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
A_ :Union[str, Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
A_ :int = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Tuple =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str =["""input_ids""", """attention_mask"""]
UpperCamelCase__ : Any =MBartTokenizer
UpperCamelCase__ : List[int] =[]
UpperCamelCase__ : List[int] =[]
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : int =vocab_file
__UpperCamelCase : Tuple =False if not self.vocab_file else True
__UpperCamelCase : Dict =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__UpperCamelCase : List[str] ={
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__UpperCamelCase : Optional[Any] =src_lang if src_lang is not None else 'en_XX'
__UpperCamelCase : str =self.convert_tokens_to_ids(self._src_lang )
__UpperCamelCase : str =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[Any] =[self.sep_token_id]
__UpperCamelCase : Tuple =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase : List[str] =src_lang
__UpperCamelCase : List[str] =self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =self.convert_tokens_to_ids(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =tgt_lang_id
return inputs
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = "en_XX" , lowerCamelCase__ = None , lowerCamelCase__ = "ro_RO" , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Dict =src_lang
__UpperCamelCase : Optional[Any] =tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.convert_tokens_to_ids(lowerCamelCase__ )
__UpperCamelCase : Tuple =[]
__UpperCamelCase : Optional[Any] =[self.eos_token_id, self.cur_lang_code]
__UpperCamelCase : Optional[Any] =self.convert_ids_to_tokens(self.prefix_tokens )
__UpperCamelCase : List[Any] =self.convert_ids_to_tokens(self.suffix_tokens )
__UpperCamelCase : Union[str, Any] =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.convert_tokens_to_ids(lowerCamelCase__ )
__UpperCamelCase : int =[]
__UpperCamelCase : List[str] =[self.eos_token_id, self.cur_lang_code]
__UpperCamelCase : Optional[int] =self.convert_ids_to_tokens(self.prefix_tokens )
__UpperCamelCase : Dict =self.convert_ids_to_tokens(self.suffix_tokens )
__UpperCamelCase : List[str] =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
__UpperCamelCase : List[Any] =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 71 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __A ( a ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =tempfile.mkdtemp()
__UpperCamelCase : Optional[int] =8
# DPR tok
__UpperCamelCase : str =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase : Optional[Any] =os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__UpperCamelCase : Dict =os.path.join(lowerCamelCase__ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase : Optional[int] =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase : str =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : Optional[int] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase : Any ={'unk_token': '<unk>'}
__UpperCamelCase : Any =os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__UpperCamelCase : Any =os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Dict =os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __lowercase ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __lowercase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =os.path.join(self.tmpdirname , 'rag_tokenizer' )
__UpperCamelCase : Dict =RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__UpperCamelCase : List[Any] =RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCamelCase__ )
rag_tokenizer.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : int =RagTokenizer.from_pretrained(lowerCamelCase__ , config=lowerCamelCase__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =RagTokenizer.from_pretrained('facebook/rag-token-nq' )
__UpperCamelCase : Union[str, Any] =[
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__UpperCamelCase : int =tokenizer(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
__UpperCamelCase : Union[str, Any] =[
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__UpperCamelCase : Any =tokenizer(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 71 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_( _snake_case : int , _snake_case : int ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__a) , (__a)) =extended_euclid(_snake_case , a % b )
__a =a // b
return (y, x - k * y)
def UpperCamelCase_( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
((__a) , (__a)) =extended_euclid(_snake_case , _snake_case )
__a =na * na
__a =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase_( _snake_case : int , _snake_case : int ):
"""simple docstring"""
((__a) , (__a)) =extended_euclid(_snake_case , _snake_case )
if b < 0:
__a =(b % n + n) % n
return b
def UpperCamelCase_( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
__a , __a =invert_modulo(_snake_case , _snake_case ), invert_modulo(_snake_case , _snake_case )
__a =na * na
__a =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 371 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308 | 0 |
import cmath
import math
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_):
UpperCamelCase__: int = math.radians(A_)
UpperCamelCase__: Tuple = math.radians(A_)
# Convert voltage and current to rectangular form
UpperCamelCase__: int = cmath.rect(A_ ,A_)
UpperCamelCase__: int = cmath.rect(A_ ,A_)
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 |
def lowerCAmelCase_ ( A_ ,A_):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
else:
return a * actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
def lowerCAmelCase_ ( A_ ,A_):
if b < 0:
return 1 / actual_power(A_ ,A_)
return actual_power(A_ ,A_)
if __name__ == "__main__":
print(power(-2, -3))
| 149 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = '''xmod'''
def __init__( self : Optional[Any] , _A : Union[str, Any]=30_522 , _A : List[Any]=768 , _A : Optional[Any]=12 , _A : Any=12 , _A : Tuple=3_072 , _A : Optional[int]="gelu" , _A : List[Any]=0.1 , _A : str=0.1 , _A : List[Any]=512 , _A : List[str]=2 , _A : str=0.02 , _A : Any=1E-12 , _A : Union[str, Any]=1 , _A : List[Any]=0 , _A : Dict=2 , _A : int="absolute" , _A : Dict=True , _A : int=None , _A : List[str]=False , _A : Dict=2 , _A : int=False , _A : Optional[int]=True , _A : Any=True , _A : Optional[int]=("en_XX",) , _A : Any=None , **_A : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
lowercase : Optional[Any] = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Union[str, Any] = hidden_act
lowercase : Tuple = intermediate_size
lowercase : List[str] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Any = type_vocab_size
lowercase : Optional[Any] = initializer_range
lowercase : str = layer_norm_eps
lowercase : Tuple = position_embedding_type
lowercase : Optional[Any] = use_cache
lowercase : int = classifier_dropout
lowercase : Optional[int] = pre_norm
lowercase : Any = adapter_reduction_factor
lowercase : Union[str, Any] = adapter_layer_norm
lowercase : Optional[int] = adapter_reuse_layer_norm
lowercase : Optional[Any] = ln_before_adapter
lowercase : Union[str, Any] = list(_A )
lowercase : List[Any] = default_language
class _A ( _lowerCamelCase ):
@property
def __a ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 116 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / 'model_card_template.md'
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def snake_case( __magic_name__ = None ) -> str:
'''simple docstring'''
lowercase : List[Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__magic_name__ , __magic_name__ ):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__magic_name__ , __magic_name__ ):
ua += "; " + user_agent
return ua
def snake_case( __magic_name__ , __magic_name__ = None , __magic_name__ = None ) -> Optional[Any]:
'''simple docstring'''
if token is None:
lowercase : int = HfFolder.get_token()
if organization is None:
lowercase : List[str] = whoami(__magic_name__ )['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__magic_name__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
lowercase : Optional[Any] = args.hub_token if hasattr(__magic_name__ , '''hub_token''' ) else None
lowercase : int = get_full_repo_name(__magic_name__ , token=__magic_name__ )
lowercase : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__magic_name__ , model_name=__magic_name__ , repo_name=__magic_name__ , dataset_name=args.dataset_name if hasattr(__magic_name__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__magic_name__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__magic_name__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(__magic_name__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__magic_name__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__magic_name__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__magic_name__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__magic_name__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__magic_name__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(__magic_name__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__magic_name__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
lowercase : Any = os.path.join(args.output_dir , '''README.md''' )
model_card.save(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ = None ) -> int:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase : Dict = str(Path(__magic_name__ ).as_posix() )
lowercase : Any = re.search(r'''snapshots/([^/]+)/''' , __magic_name__ )
if search is None:
return None
lowercase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__magic_name__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, 'diffusers')
def snake_case( __magic_name__ = None , __magic_name__ = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
lowercase : str = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase : List[str] = old_diffusers_cache
lowercase : str = Path(__magic_name__ ).expanduser()
lowercase : Dict = Path(__magic_name__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase : List[Any] = new_cache_dir / old_blob_path.relative_to(__magic_name__ )
new_blob_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
os.replace(__magic_name__ , __magic_name__ )
try:
os.symlink(__magic_name__ , __magic_name__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def snake_case( __magic_name__ , __magic_name__ = None ) -> str:
'''simple docstring'''
if variant is not None:
lowercase : List[str] = weights_name.split('''.''' )
lowercase : Any = splits[:-1] + [variant] + splits[-1:]
lowercase : Tuple = '''.'''.join(__magic_name__ )
return weights_name
def snake_case( __magic_name__ , *,
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] = str(__magic_name__ )
if os.path.isfile(__magic_name__ ):
return pretrained_model_name_or_path
elif os.path.isdir(__magic_name__ ):
if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) ):
# Load from a PyTorch checkpoint
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__magic_name__ , __magic_name__ , __magic_name__ ) ):
lowercase : str = os.path.join(__magic_name__ , __magic_name__ , __magic_name__ )
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__magic_name__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
lowercase : int = hf_hub_download(
__magic_name__ , filename=_add_variant(__magic_name__ , __magic_name__ ) , cache_dir=__magic_name__ , force_download=__magic_name__ , proxies=__magic_name__ , resume_download=__magic_name__ , local_files_only=__magic_name__ , use_auth_token=__magic_name__ , user_agent=__magic_name__ , subfolder=__magic_name__ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __magic_name__ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__magic_name__ , __magic_name__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__magic_name__ , __magic_name__ )}' so that the correct variant file can be added.""" , __magic_name__ , )
try:
# 2. Load model file as usual
lowercase : Dict = hf_hub_download(
__magic_name__ , filename=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , proxies=__magic_name__ , resume_download=__magic_name__ , local_files_only=__magic_name__ , use_auth_token=__magic_name__ , user_agent=__magic_name__ , subfolder=__magic_name__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""" ) | 116 | 1 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE( metaclass=a_ ):
_UpperCAmelCase = ["torch", "scipy"]
def __init__( self: Dict , *UpperCamelCase: List[Any] , **UpperCamelCase: str ) -> List[str]:
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def lowerCAmelCase_ ( cls: str , *UpperCamelCase: Dict , **UpperCamelCase: Dict ) -> Tuple:
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def lowerCAmelCase_ ( cls: Optional[int] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: List[str] ) -> Optional[Any]:
requires_backends(cls , ['torch', 'scipy'] )
| 307 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCamelCase : Any = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : List[str] = logging.get_logger(__name__)
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=False ):
lowerCAmelCase : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Dict = ''''''
else:
lowerCAmelCase : str = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : str = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase : Optional[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : List[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Dict = in_proj_bias[-config.hidden_size :]
def _snake_case ( _snake_case : Optional[Any] ):
lowerCAmelCase : Optional[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Tuple ):
lowerCAmelCase : Union[str, Any] = dct.pop(_snake_case )
lowerCAmelCase : List[Any] = val
def _snake_case ( ):
lowerCAmelCase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def _snake_case ( _snake_case : Dict , _snake_case : Tuple ):
lowerCAmelCase : Optional[int] = ViTConfig()
lowerCAmelCase : int = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase : Dict = True
lowerCAmelCase : Dict = int(vit_name[-12:-10] )
lowerCAmelCase : Any = int(vit_name[-9:-6] )
else:
lowerCAmelCase : int = 1000
lowerCAmelCase : List[str] = '''huggingface/label-files'''
lowerCAmelCase : Optional[int] = '''imagenet-1k-id2label.json'''
lowerCAmelCase : Dict = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase : Dict = {int(_snake_case ): v for k, v in idalabel.items()}
lowerCAmelCase : Dict = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase : Union[str, Any] = int(vit_name[-6:-4] )
lowerCAmelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowerCAmelCase : Optional[int] = 192
lowerCAmelCase : Optional[int] = 768
lowerCAmelCase : Dict = 12
lowerCAmelCase : Optional[int] = 3
elif vit_name[9:].startswith('''small''' ):
lowerCAmelCase : Union[str, Any] = 384
lowerCAmelCase : Optional[int] = 1536
lowerCAmelCase : Optional[int] = 12
lowerCAmelCase : List[Any] = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowerCAmelCase : Optional[Any] = 768
lowerCAmelCase : str = 2304
lowerCAmelCase : Dict = 8
lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowerCAmelCase : Union[str, Any] = 1024
lowerCAmelCase : Optional[int] = 4096
lowerCAmelCase : Any = 24
lowerCAmelCase : Optional[Any] = 16
elif vit_name[4:].startswith('''huge''' ):
lowerCAmelCase : Dict = 1280
lowerCAmelCase : Optional[Any] = 5120
lowerCAmelCase : Optional[int] = 32
lowerCAmelCase : Optional[int] = 16
# load original model from timm
lowerCAmelCase : Union[str, Any] = timm.create_model(_snake_case , pretrained=_snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase : int = timm_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
lowerCAmelCase : Dict = create_rename_keys(_snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , _snake_case )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase : Any = ViTModel(_snake_case ).eval()
else:
lowerCAmelCase : Optional[Any] = ViTForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase : str = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase : List[Any] = ViTImageProcessor(size=config.image_size )
lowerCAmelCase : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase : str = encoding['''pixel_values''']
lowerCAmelCase : Any = model(_snake_case )
if base_model:
lowerCAmelCase : List[Any] = timm_model.forward_features(_snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 )
else:
lowerCAmelCase : Any = timm_model(_snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 314 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Tuple = git.Repo(search_parent_directories=_snake_case )
lowerCAmelCase : Optional[int] = {
'''repo_id''': str(_snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_snake_case , _snake_case , indent=4 )
def _snake_case ( _snake_case : Any ):
if params.n_gpu <= 0:
lowerCAmelCase : Dict = 0
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : Dict = True
lowerCAmelCase : int = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCAmelCase : str = int(os.environ['''WORLD_SIZE'''] )
lowerCAmelCase : Optional[int] = int(os.environ['''N_GPU_NODE'''] )
lowerCAmelCase : int = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowerCAmelCase : Dict = params.world_size // params.n_gpu_per_node
lowerCAmelCase : int = params.global_rank // params.n_gpu_per_node
lowerCAmelCase : str = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : Any = 1
lowerCAmelCase : Any = 1
lowerCAmelCase : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCAmelCase : Tuple = params.node_id == 0 and params.local_rank == 0
lowerCAmelCase : List[Any] = params.n_nodes > 1
# summary
lowerCAmelCase : Optional[int] = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def _snake_case ( _snake_case : Optional[int] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 314 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase_ = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
lowercase_ = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
lowercase_ = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ) -> List[Any]:
return float((preds == labels).mean() )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> int:
__a = simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )
__a = float(fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> List[Any]:
__a = float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )
__a = float(spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __UpperCAmelCase ( self , _a , _a ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "stsb":
return pearson_and_spearman(_a , _a )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_a , _a )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 45 |
"""simple docstring"""
lowercase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str:
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__a = year // 100
__a = (5 * (century % 4) + 2) % 7
__a = year % 100
__a = centurian % 12
__a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCamelCase_ : str = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = """vision-encoder-decoder"""
__UpperCamelCase : List[Any] = True
def __init__( self : Tuple , **snake_case_ : Dict ):
super().__init__(**snake_case_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
UpperCamelCase_: Dict = kwargs.pop("""encoder""" )
UpperCamelCase_: Optional[int] = encoder_config.pop("""model_type""" )
UpperCamelCase_: Dict = kwargs.pop("""decoder""" )
UpperCamelCase_: int = decoder_config.pop("""model_type""" )
UpperCamelCase_: List[str] = AutoConfig.for_model(snake_case_ , **snake_case_ )
UpperCamelCase_: str = AutoConfig.for_model(snake_case_ , **snake_case_ )
UpperCamelCase_: Tuple = True
@classmethod
def lowerCAmelCase__ ( cls : int , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , **snake_case_ : List[Any] ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
UpperCamelCase_: str = True
UpperCamelCase_: Dict = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[str] = copy.deepcopy(self.__dict__ )
UpperCamelCase_: Dict = self.encoder.to_dict()
UpperCamelCase_: Dict = self.decoder.to_dict()
UpperCamelCase_: Optional[int] = self.__class__.model_type
return output
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Any = version.parse("""1.11""" )
@property
def lowerCAmelCase__ ( self : List[Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self : Dict ):
return 1e-4
@property
def lowerCAmelCase__ ( self : List[str] ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _UpperCamelCase ( _A ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = OrderedDict()
UpperCamelCase_: Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
UpperCamelCase_: List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
UpperCamelCase_: Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase__ ( self : Any , snake_case_ : "PreTrainedTokenizerBase" , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional["TensorType"] = None , ):
import torch
UpperCamelCase_: List[Any] = OrderedDict()
UpperCamelCase_: Tuple = super().generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
UpperCamelCase_, UpperCamelCase_: int = dummy_input["""input_ids"""].shape
UpperCamelCase_: List[Any] = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_: List[Any] = dummy_input.pop("""input_ids""" )
UpperCamelCase_: Any = dummy_input.pop("""attention_mask""" )
UpperCamelCase_: Tuple = torch.zeros(snake_case_ )
return common_inputs
class _UpperCamelCase ( _A ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
def lowerCAmelCase__ ( self : List[str] , snake_case_ : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(snake_case_ )
def lowerCAmelCase__ ( self : Any , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , snake_case_ : str = "default" ):
UpperCamelCase_: Union[str, Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(snake_case_ , snake_case_ )
| 223 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase_ : str = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=None , snake_case_ : str=1 ):
UpperCamelCase_: List[str] = tokenizer
UpperCamelCase_: str = dataset
UpperCamelCase_: List[str] = len(snake_case_ ) if n_tasks is None else n_tasks
UpperCamelCase_: str = n_copies
def __iter__( self : Tuple ):
UpperCamelCase_: Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
UpperCamelCase_: List[str] = self.tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = start_length
UpperCamelCase_: Dict = eof_strings
UpperCamelCase_: List[str] = tokenizer
def __call__( self : Tuple , snake_case_ : List[str] , snake_case_ : Optional[Any] , **snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCamelCase_: Dict = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(snake_case_ )
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: str = re.split("""(%s)""" % """|""".join(lowerCamelCase ) , lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=20 , **lowerCamelCase ) -> int:
UpperCamelCase_: str = defaultdict(lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(lowerCamelCase ) ):
with torch.no_grad():
UpperCamelCase_: Optional[int] = batch["""ids"""].shape[-1]
UpperCamelCase_: Dict = accelerator.unwrap_model(lowerCamelCase ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=lowerCamelCase , **lowerCamelCase )
# each task is generated batch_size times
UpperCamelCase_: Optional[int] = batch["""task_id"""].repeat(lowerCamelCase )
UpperCamelCase_: int = accelerator.pad_across_processes(
lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCamelCase_, UpperCamelCase_: Tuple = accelerator.gather((generated_tokens, generated_tasks) )
UpperCamelCase_: Tuple = generated_tokens.cpu().numpy()
UpperCamelCase_: Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(lowerCamelCase , lowerCamelCase ):
gen_token_dict[task].append(lowerCamelCase )
UpperCamelCase_: Dict = [[] for _ in range(lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCamelCase_: Any = tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
code_gens[task].append(remove_last_block(lowerCamelCase ) )
return code_gens
def A__ ( ) -> Union[str, Any]:
# Setup configuration
UpperCamelCase_: Optional[Any] = HfArgumentParser(lowerCamelCase )
UpperCamelCase_: str = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCamelCase_: List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCamelCase_: Union[str, Any] = """false"""
if args.num_workers is None:
UpperCamelCase_: Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCamelCase_: List[Any] = Accelerator()
set_seed(args.seed , device_specific=lowerCamelCase )
# Load model and tokenizer
UpperCamelCase_: Any = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase_: Union[str, Any] = tokenizer.eos_token
UpperCamelCase_: Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCamelCase_: Union[str, Any] = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , lowerCamelCase , lowerCamelCase )] ),
}
# Load evaluation dataset and metric
UpperCamelCase_: Any = load_dataset("""openai_humaneval""" )
UpperCamelCase_: Union[str, Any] = load_metric("""code_eval""" )
UpperCamelCase_: Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
UpperCamelCase_: List[Any] = args.n_samples // args.batch_size
UpperCamelCase_: Any = TokenizedDataset(lowerCamelCase , human_eval["""test"""] , n_copies=lowerCamelCase , n_tasks=lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCamelCase_: Optional[int] = DataLoader(lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCamelCase_: List[str] = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
UpperCamelCase_, UpperCamelCase_: Dict = accelerator.prepare(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = complete_code(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , n_tasks=lowerCamelCase , batch_size=args.batch_size , **lowerCamelCase , )
if accelerator.is_main_process:
UpperCamelCase_: List[Any] = []
for task in tqdm(range(lowerCamelCase ) ):
UpperCamelCase_: Optional[Any] = human_eval["""test"""][task]["""test"""]
UpperCamelCase_: Optional[int] = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
UpperCamelCase_, UpperCamelCase_: str = code_eval_metric.compute(
references=lowerCamelCase , predictions=lowerCamelCase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 223 | 1 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_snake_case = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=True ) -> List[str]:
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_lowercase : Optional[int] = cached_file(A__ , A__ , force_download=not use_cached_models )
_lowercase : Optional[int] = config_class.from_json_file(A__ )
_lowercase : List[str] = True
_lowercase : Optional[int] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
_lowercase : Tuple = model_class(A__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_lowercase : Optional[int] = cached_file(
A__ , A__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_lowercase : Union[str, Any] = load_pytorch_checkpoint_in_tfa_model(A__ , A__ )
if compare_with_pt_model:
_lowercase : Optional[int] = tf_model(tf_model.dummy_inputs , training=A__ ) # build the network
_lowercase : Optional[Any] = torch.load(A__ , map_location="cpu" )
_lowercase : Union[str, Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=A__ , config=A__ , state_dict=A__ )
with torch.no_grad():
_lowercase : List[str] = pt_model(**pt_model.dummy_inputs )
_lowercase : Any = pto[0].numpy()
_lowercase : Tuple = tfo[0].numpy()
_lowercase : Any = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A__ , save_format="h5" )
def _A ( snake_case , snake_case , snake_case=None , snake_case=None , snake_case=False , snake_case=False , snake_case=False , snake_case=False , ) -> List[str]:
if args_model_type is None:
_lowercase : List[Any] = list(MODEL_CLASSES.keys() )
else:
_lowercase : Optional[int] = [args_model_type]
for j, model_type in enumerate(A__ , start=1 ):
print("=" * 1_00 )
print(F''' Converting model type {j}/{len(A__ )}: {model_type}''' )
print("=" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_lowercase : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_lowercase : Tuple = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A__ , A__ ) , start=1 ):
print("-" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
_lowercase : List[str] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(A__ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 1_00 )
if config_shortcut_name in aws_config_map:
_lowercase : str = cached_file(A__ , A__ , force_download=not use_cached_models )
else:
_lowercase : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_lowercase : Tuple = cached_file(A__ , A__ , force_download=not use_cached_models )
else:
_lowercase : int = model_shortcut_name
if os.path.isfile(A__ ):
_lowercase : Union[str, Any] = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=A__ , pytorch_checkpoint_path=A__ , config_file=A__ , tf_dump_path=os.path.join(A__ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A__ , )
if remove_cached_files:
os.remove(A__ )
os.remove(A__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
_snake_case = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 250 |
from __future__ import annotations
def a ( A__ : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_lowercase =nums[0]
_lowercase =0
for num in nums[1:]:
_lowercase , _lowercase =(
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 | 0 |
import requests
_UpperCAmelCase = """""" # <-- Put your OpenWeatherMap appid here!
_UpperCAmelCase = """https://api.openweathermap.org/data/2.5/"""
def UpperCamelCase ( __lowercase : str = "Chicago" ,__lowercase : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + 'weather' ,params=locals() ).json()
def UpperCamelCase ( __lowercase : str = "Kolkata, India" ,__lowercase : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' ,params=locals() ).json()
def UpperCamelCase ( __lowercase : float = 55.68 ,__lowercase : float = 12.57 ,__lowercase : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_UpperCAmelCase = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 192 | import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 192 | 1 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
a__ : Union[str, Any] =[
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 53 |
from timeit import timeit
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a_ ( ):
def do_benchmark(lowerCAmelCase_ : int ) -> None:
__lowerCAmelCase = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit('z.get_set_bits_count_using_modulo_operator(25)', setup=lowerCAmelCase_ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)', setup=lowerCAmelCase_, )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 284 | 0 |
snake_case : Optional[int] = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
snake_case : Tuple = [{"type": "code", "content": INSTALL_CONTENT}]
snake_case : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 41 |
def lowerCAmelCase_ ( _snake_case : int ) -> int:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__magic_name__ : Dict = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(_snake_case )
else:
__magic_name__ : str = sylvester(number - 1 )
__magic_name__ : Tuple = num - 1
__magic_name__ : Union[str, Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 41 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase ) | 262 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 262 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__lowercase = ''''''
__lowercase = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(A__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__lowercase , __lowercase = 0, 0
# length[i] shows the length of palindromic substring with center i
__lowercase = [1 for i in range(len(A__ ) )]
# for each character in new_string find corresponding palindromic string
__lowercase = 0
for j in range(len(A__ ) ):
__lowercase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(A__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__lowercase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__lowercase = j - k + 1 # noqa: E741
__lowercase = j + k - 1
# update max_length and start position
if max_length < length[j]:
__lowercase = length[j]
__lowercase = j
# create that string
__lowercase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
'''simple docstring'''
from math import sqrt
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(A__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(A__ , A__ ), "'status' must been from type bool"
return status
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(A__ ):
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(A__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A__ ):
while quotient != 1:
if is_prime(A__ ) and (quotient % factor == 0):
ans.append(A__ )
quotient /= factor
else:
factor += 1
else:
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = max(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = min(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , A__ ), "compare bust been from type bool"
return number % 2 == 0
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , A__ ), "compare bust been from type bool"
return number % 2 != 0
def _A ( A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ ) and (number > 2) and is_even(A__ )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(A__ )
__lowercase = len(A__ )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A__ , A__ )
and (len(A__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(A__ , A__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(A__ )
__lowercase = prime_factorization(A__ )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(A__ , A__ )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(A__ )
__lowercase = prime_fac_a.count(A__ )
for _ in range(max(A__ , A__ ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# precondition
assert isinstance(A__ , A__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A__ ):
ans += 1
# precondition
assert isinstance(A__ , A__ ) and is_prime(
A__ ), "'ans' must been a prime number and from type int"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
is_prime(A__ ) and is_prime(A__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
while number < p_number_a:
ans.append(A__ )
number += 1
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
# precondition
assert (
isinstance(A__ , A__ )
and ans[0] != p_number_a
and ans[len(A__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(A__ )
# precondition
assert ans[0] == 1 and ans[len(A__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(A__ )
# precondition
assert (
isinstance(A__ , A__ )
and (divisors[0] == 1)
and (divisors[len(A__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(A__ ) , abs(A__ ) )
# precondition
assert (
isinstance(A__ , A__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 52 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__SCREAMING_SNAKE_CASE : Optional[int] = "docs/source/en/_toctree.yml"
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = defaultdict(__lowerCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case_ = [key for key, value in counts.items() if value > 1]
snake_case_ = []
for duplicate_key in duplicates:
snake_case_ = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(__lowerCAmelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(__lowerCAmelCase , key=lambda _SCREAMING_SNAKE_CASE : s["title"].lower() )
def _a ( _SCREAMING_SNAKE_CASE=False ) -> str:
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case_ = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ = content[api_idx]["""sections"""]
# Then to the model doc
snake_case_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case_ = api_doc[model_idx]["""sections"""]
snake_case_ = [(idx, section) for idx, section in enumerate(__lowerCAmelCase ) if """sections""" in section]
snake_case_ = False
for idx, modality_doc in modalities_docs:
snake_case_ = modality_doc["""sections"""]
snake_case_ = clean_model_doc_toc(__lowerCAmelCase )
if old_modality_doc != new_modality_doc:
snake_case_ = True
if overwrite:
snake_case_ = new_modality_doc
if diff:
if overwrite:
snake_case_ = model_doc
snake_case_ = api_doc
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 347 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase : Tuple = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
UpperCAmelCase : Optional[int] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
UpperCAmelCase : List[str] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ = new_id
# turn into Numpy arrays
lowercase_ = np.array(__lowerCAmelCase )
lowercase_ = np.array(__lowerCAmelCase )
if reduce_labels:
lowercase_ = 2_55
lowercase_ = label - 1
lowercase_ = 2_55
lowercase_ = label != ignore_index
lowercase_ = np.not_equal(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = pred_label[mask]
lowercase_ = np.array(__lowerCAmelCase )[mask]
lowercase_ = pred_label[pred_label == label]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ = intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = total_intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# compute metrics
lowercase_ = {}
lowercase_ = total_area_intersect.sum() / total_area_label.sum()
lowercase_ = total_area_intersect / total_area_union
lowercase_ = total_area_intersect / total_area_label
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = all_acc
lowercase_ = iou
lowercase_ = acc
if nan_to_num is not None:
lowercase_ = {metric: np.nan_to_num(__lowerCAmelCase , nan=__lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
}) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Dict[int, int]] = None , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
lowercase_ = mean_iou(
results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , )
return iou_result
| 136 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = 42
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , _UpperCAmelCase = 6_55_36 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
super().__init__()
snake_case_ = sample_size
# time
if time_embedding_type == "fourier":
snake_case_ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase )
snake_case_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case_ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase )
snake_case_ = block_out_channels[0]
if use_timestep_embedding:
snake_case_ = block_out_channels[0] * 4
snake_case_ = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
snake_case_ = nn.ModuleList([] )
snake_case_ = None
snake_case_ = nn.ModuleList([] )
snake_case_ = None
# down
snake_case_ = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case_ = i == len(_UpperCAmelCase ) - 1
snake_case_ = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase )
# mid
snake_case_ = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
snake_case_ = list(reversed(_UpperCAmelCase ) )
snake_case_ = reversed_block_out_channels[0]
if out_block_type is None:
snake_case_ = out_channels
else:
snake_case_ = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase ):
snake_case_ = output_channel
snake_case_ = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase ) - 1 else final_upsample_channels
)
snake_case_ = i == len(_UpperCAmelCase ) - 1
snake_case_ = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase )
snake_case_ = output_channel
# out
snake_case_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case_ = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
snake_case_ = timestep
if not torch.is_tensor(_UpperCAmelCase ):
snake_case_ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps[None].to(sample.device )
snake_case_ = self.time_proj(_UpperCAmelCase )
if self.config.use_timestep_embedding:
snake_case_ = self.time_mlp(_UpperCAmelCase )
else:
snake_case_ = timestep_embed[..., None]
snake_case_ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case_ = ()
for downsample_block in self.down_blocks:
snake_case_ , snake_case_ = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case_ = self.mid_block(_UpperCAmelCase , _UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case_ = down_block_res_samples[-1:]
snake_case_ = down_block_res_samples[:-1]
snake_case_ = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase )
# 5. post-process
if self.out_block:
snake_case_ = self.out_block(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase ) | 267 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = None
__snake_case = None
@property
def UpperCamelCase__ ( self ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''padding_value''' ) )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase=False ):
def _inputs_have_equal_length(_UpperCAmelCase ):
snake_case_ = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1E-3 ):
return False
return True
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = self.feat_extract_tester.seq_length_diff
snake_case_ = self.feat_extract_tester.max_seq_length + pad_diff
snake_case_ = self.feat_extract_tester.min_seq_length
snake_case_ = self.feat_extract_tester.batch_size
snake_case_ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
snake_case_ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''max_length''' )[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
snake_case_ = feat_extract.pad(_UpperCAmelCase , pad_to_multiple_of=10 )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , pad_to_multiple_of=10 )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase , return_tensors='''np''' , )
snake_case_ = input_a[input_name]
self.assertTrue(all(len(_UpperCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
snake_case_ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def UpperCamelCase__ ( self , _UpperCAmelCase=False ):
def _inputs_have_equal_length(_UpperCAmelCase ):
snake_case_ = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1E-3 ):
return False
return True
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
snake_case_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
# truncate to smallest with np
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
# truncate to middle
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase , return_tensors='''np''' , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , truncation=_UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''longest''' , truncation=_UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''longest''' , truncation=_UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , truncation=_UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
snake_case_ = 12
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
snake_case_ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
snake_case_ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
def UpperCamelCase__ ( self ):
self._check_padding(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_padding(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_truncation(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_truncation(numpify=_UpperCAmelCase )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCAmelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = [len(_UpperCAmelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCAmelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = [len(_UpperCAmelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = min(_UpperCAmelCase )
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) | 267 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : List[Any] = 16
A : Optional[int] = 32
def a__ ( __UpperCamelCase , __UpperCamelCase = 1_6 ):
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_ = load_dataset("glue" , "mrpc" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ = 8
else:
SCREAMING_SNAKE_CASE_ = None
return tokenizer.pad(
__UpperCamelCase , padding="longest" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets["train"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets["validation"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : List[str] = mocked_dataloaders # noqa: F811
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __UpperCamelCase ) == "1":
SCREAMING_SNAKE_CASE_ = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ = config["""lr"""]
SCREAMING_SNAKE_CASE_ = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_ = int(config["seed"] )
SCREAMING_SNAKE_CASE_ = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_ = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__UpperCamelCase )
def inner_training_loop(__UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ = AdamW(params=model.parameters() , lr=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
SCREAMING_SNAKE_CASE_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 118 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
# create estimator
lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
| 37 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE_ = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE_ = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE = numpy_to_pil(_SCREAMING_SNAKE_CASE )
return images
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if images.ndim == 3:
SCREAMING_SNAKE_CASE = images[None, ...]
SCREAMING_SNAKE_CASE = (images * 2_55).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
SCREAMING_SNAKE_CASE = [Image.fromarray(_SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 193 |
import json
import sys
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE = json.load(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = results[benchmark_name]
SCREAMING_SNAKE_CASE = benchmark_name.split("""/""" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
SCREAMING_SNAKE_CASE = """| metric |"""
SCREAMING_SNAKE_CASE = """|--------|"""
SCREAMING_SNAKE_CASE = """| new / old (diff) |"""
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE = metric_vals["""new"""]
SCREAMING_SNAKE_CASE = metric_vals.get("""old""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = metric_vals.get("""diff""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = F""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else """None"""
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = sys.argv[1]
SCREAMING_SNAKE_CASE_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 193 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ : int
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
_A , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__SCREAMING_SNAKE_CASE = self.conv(_A )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ : int
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(_A )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ : int
UpperCamelCase__ : int = None
UpperCamelCase__ : float = 0.0
UpperCamelCase__ : bool = None
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = nn.Dense(_A , dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , _A , _A , _A=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(_A )
__SCREAMING_SNAKE_CASE = nn.swish(_A )
__SCREAMING_SNAKE_CASE = self.conva(_A )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(_A ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(_A , 1 ) , 1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(_A )
__SCREAMING_SNAKE_CASE = nn.swish(_A )
__SCREAMING_SNAKE_CASE = self.dropout(_A , _A )
__SCREAMING_SNAKE_CASE = self.conva(_A )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(_A )
return hidden_states + residual
| 257 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A = 16 , _A = 88 , _A = None , _A = 1 , _A = 0.0 , _A = 32 , _A = None , _A = False , _A = None , _A = None , _A = "geglu" , _A = None , ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_A , attention_head_dim=_A , in_channels=_A , num_layers=_A , dropout=_A , norm_num_groups=_A , cross_attention_dim=_A , attention_bias=_A , sample_size=_A , num_vector_embeds=_A , activation_fn=_A , num_embeds_ada_norm=_A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__SCREAMING_SNAKE_CASE = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__SCREAMING_SNAKE_CASE = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__SCREAMING_SNAKE_CASE = [1, 0]
def _A ( self , _A , _A , _A=None , _A=None , _A=None , _A = True , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__SCREAMING_SNAKE_CASE = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__SCREAMING_SNAKE_CASE = self.transformer_index_for_condition[i]
__SCREAMING_SNAKE_CASE = self.transformers[transformer_index](
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , return_dict=_A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__SCREAMING_SNAKE_CASE = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__SCREAMING_SNAKE_CASE = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_A )
| 257 | 1 |
from math import isqrt
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCamelCase__ ) + 1 ) )
def __lowerCamelCase ( lowerCamelCase__ = 10**6 ):
"""simple docstring"""
lowercase__ : int = 0
lowercase__ : Optional[Any] = 1
lowercase__ : List[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCamelCase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 121 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = {}
lowercase__ : Tuple = tokenizer(example["content"] , truncation=lowerCamelCase__ )["input_ids"]
lowercase__ : Optional[int] = len(example["content"] ) / len(output["input_ids"] )
return output
lowerCAmelCase__ = HfArgumentParser(PretokenizationArguments)
lowerCAmelCase__ = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase__ = multiprocessing.cpu_count()
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowerCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 121 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=32 , lowercase=3 , lowercase=4 , lowercase=[10, 20, 30, 40] , lowercase=[2, 2, 3, 2] , lowercase=True , lowercase=True , lowercase=37 , lowercase="gelu" , lowercase=10 , lowercase=0.02 , lowercase=["stage2", "stage3", "stage4"] , lowercase=3 , lowercase=None , ) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = parent
a__ : List[str] = batch_size
a__ : str = image_size
a__ : int = num_channels
a__ : List[str] = num_stages
a__ : Tuple = hidden_sizes
a__ : List[str] = depths
a__ : str = is_training
a__ : Any = use_labels
a__ : Optional[int] = intermediate_size
a__ : Union[str, Any] = hidden_act
a__ : List[str] = type_sequence_label_size
a__ : List[str] = initializer_range
a__ : Optional[Any] = out_features
a__ : Union[str, Any] = num_labels
a__ : Dict = scope
a__ : List[str] = num_stages
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : int = None
if self.use_labels:
a__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : Tuple = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=lowercase , loss_ignore_index=255 , num_labels=self.num_labels , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : Tuple = UperNetForSemanticSegmentation(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Optional[Any] = model(lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Dict = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) ,
) : List[Any] = config_and_inputs
a__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Tuple = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__A : Union[str, Any] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : List[str] = False
__A : Union[str, Any] = False
__A : Any = False
__A : List[Any] = False
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = UperNetModelTester(self)
a__ : List[str] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str = model_class(lowercase)
a__ : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : List[Any] = [*signature.parameters.keys()]
a__ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase)
@unittest.skip(reason='UperNet does not use inputs_embeds')
def __lowercase ( self) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings')
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model')
def __lowercase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model')
def __lowercase ( self) -> List[str]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__ : Optional[int] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Dict = model(**self._prepare_for_class(lowercase , lowercase))
a__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(lowercase) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a__ , a__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[Any] = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Tuple = True
check_hidden_states_output(lowercase , lowercase , lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ , a__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[Any] = _config_zero_init(lowercase)
a__ : Dict = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
a__ : Any = model_class(config=lowercase)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='UperNet does not have tied weights')
def __lowercase ( self) -> str:
'''simple docstring'''
pass
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> Optional[Any]:
a__ : Dict = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
a__ : Tuple = Image.open(A__ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny')
a__ : List[str] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny').to(lowercase)
a__ : str = prepare_img()
a__ : List[str] = processor(images=lowercase , return_tensors='pt').to(lowercase)
with torch.no_grad():
a__ : List[str] = model(**lowercase)
a__ : Optional[int] = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Dict = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase , atol=1e-4))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny')
a__ : Tuple = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny').to(lowercase)
a__ : Optional[int] = prepare_img()
a__ : Any = processor(images=lowercase , return_tensors='pt').to(lowercase)
with torch.no_grad():
a__ : Any = model(**lowercase)
a__ : Tuple = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : List[Any] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase , atol=1e-4))
| 99 | """simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase ( yaml.SafeLoader ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase_ : str = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else key for key in keys]
lowercase_ : List[Any] = Counter(__UpperCamelCase )
lowercase_ : str = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = super().construct_mapping(__UpperCamelCase ,deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase_ : Dict = full_content[1:].index('---' ) + 1
lowercase_ : Optional[int] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__SCREAMING_SNAKE_CASE )
class UpperCamelCase ( lowercase_ ):
# class attributes
lowercase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ , lowercase_ : Optional[int] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if path.exists():
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ : Dict = readme_file.read()
else:
lowercase_ : int = None
lowercase_ : Any = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as readme_file:
readme_file.write(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase = None ) -> str:
'''simple docstring'''
if readme_content is not None:
lowercase_ , lowercase_ : Optional[Any] = _split_yaml_from_readme(__UpperCamelCase )
lowercase_ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowercase_ : Tuple = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
lowercase_ : List[str] = yaml.load(__UpperCamelCase ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase_ : Dict = {
(key.replace('-' ,'_' ) if key.replace('-' ,'_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' ,'-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=__UpperCamelCase ,allow_unicode=__UpperCamelCase ,encoding='utf-8' ,).decode('utf-8' )
__SCREAMING_SNAKE_CASE ={
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__SCREAMING_SNAKE_CASE =ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__SCREAMING_SNAKE_CASE =ap.parse_args()
__SCREAMING_SNAKE_CASE =Path(args.readme_filepath)
__SCREAMING_SNAKE_CASE =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 213 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = OpenAIGPTTokenizer
lowercase = OpenAIGPTTokenizerFast
lowercase = True
lowercase = False
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase__ : List[Any] = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : int = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any] ):
'''simple docstring'''
return "lower newer", "lower newer"
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ : Optional[int] = 'lower'
lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>']
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Optional[int] = tokens + ['<unk>']
lowerCAmelCase__ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _lowerCamelCase ( self : Dict , a : List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(a , **a )
# Simple input
lowerCAmelCase__ : Optional[int] = 'This is a simple input'
lowerCAmelCase__ : int = ['This is a simple input 1', 'This is a simple input 2']
lowerCAmelCase__ : int = ('This is a simple input', 'This is a pair')
lowerCAmelCase__ : List[str] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A__ ( __magic_name__ ):
pass | 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any() | 307 | 0 |
"""simple docstring"""
__UpperCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _snake_case ( lowercase__ : dict[int, list[int]] , lowercase__ : int , lowercase__ : list[bool] ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = True
lowerCAmelCase_ :Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowercase__ , lowercase__ , lowercase__ )
order.append(lowercase__ )
return order
def _snake_case ( lowercase__ : dict[int, list[int]] , lowercase__ : int , lowercase__ : list[bool] ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :Optional[Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowercase__ , lowercase__ , lowercase__ )
return component
def _snake_case ( lowercase__ : dict[int, list[int]] ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase_ :str = len(lowercase__ ) * [False]
lowerCAmelCase_ :dict[int, list[int]] = {vert: [] for vert in range(len(lowercase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowercase__ )
lowerCAmelCase_ :Any = []
for i, was_visited in enumerate(lowercase__ ):
if not was_visited:
order += topology_sort(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Optional[Any] = len(lowercase__ ) * [False]
for i in range(len(lowercase__ ) ):
lowerCAmelCase_ :str = order[len(lowercase__ ) - i - 1]
if not visited[vert]:
lowerCAmelCase_ :List[Any] = find_components(lowercase__ , lowercase__ , lowercase__ )
components_list.append(lowercase__ )
return components_list
| 84 |
"""simple docstring"""
import datasets
_a = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
_a = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
_a = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase( datasets.Metric ):
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
}) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return {"accuracy": simple_accuracy(__a , __a)}
| 194 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _a :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int = 13 , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 3 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : int = 128 , UpperCAmelCase : int=[16, 32, 64, 128] , UpperCAmelCase : int = 7 , UpperCAmelCase : int = 4 , UpperCAmelCase : int = 37 , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : int = 10 , UpperCAmelCase : float = 0.02 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 128 , UpperCAmelCase : List[int] = [2, 2, 2, 2] , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = encoder_stride
A_ = num_attention_outputs
A_ = embed_dim
A_ = embed_dim + 1
A_ = resolution
A_ = depths
A_ = hidden_sizes
A_ = dim
A_ = mlp_expansion_ratio
def __A ( self : Dict ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self : Dict ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def __A ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] ):
A_ = TFEfficientFormerModel(config=UpperCAmelCase )
A_ = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ):
A_ = self.type_sequence_label_size
A_ = TFEfficientFormerForImageClassification(UpperCAmelCase )
A_ = model(UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ = 1
A_ = TFEfficientFormerForImageClassification(UpperCAmelCase )
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Any ):
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Union[str, Any] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[int] = False
def __A ( self : int ):
A_ = TFEfficientFormerModelTester(self )
A_ = ConfigTester(
self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def __A ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def __A ( self : Union[str, Any] ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def __A ( self : List[str] ):
pass
def __A ( self : List[str] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A ( self : List[Any] ):
def check_hidden_states_output(UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ):
A_ = model_class(UpperCAmelCase )
A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) , training=UpperCAmelCase )
A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
if hasattr(self.model_tester , "encoder_seq_length" ):
A_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
A_ = seq_length * self.model_tester.chunk_length
else:
A_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A_ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(self.model_tester , "seq_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "decoder_seq_length" , UpperCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=False ):
A_ = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def __A ( self : Optional[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def __A ( self : Dict ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "seq_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "encoder_seq_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "chunk_length" , UpperCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
A_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) , training=UpperCAmelCase )
A_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) , training=UpperCAmelCase )
A_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def __A ( self : int ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A_ = model_class(UpperCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A_ = model(UpperCAmelCase )
self.assertTrue(outputs_dict is not None )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[str] ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def __A ( self : List[Any] ):
A_ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="tf" )
# forward pass
A_ = model(**UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
A_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
A_ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self : Any ):
A_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="tf" )
# forward pass
A_ = model(**UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
A_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
A_ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) ) | 329 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20)) | 329 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :List[Any] = logging.get_logger(__name__)
lowerCAmelCase :str = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = """megatron-bert"""
def __init__( self : int , _A : List[str]=29056 , _A : Dict=1024 , _A : Union[str, Any]=24 , _A : List[Any]=16 , _A : List[str]=4096 , _A : int="gelu" , _A : List[Any]=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : List[Any]=2 , _A : List[str]=0.02 , _A : int=1E-12 , _A : List[Any]=0 , _A : Union[str, Any]="absolute" , _A : Any=True , **_A : Optional[int] , ) -> int:
super().__init__(pad_token_id=_A , **_A )
__magic_name__ : str = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Tuple = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : int = hidden_act
__magic_name__ : Optional[Any] = intermediate_size
__magic_name__ : Optional[Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Dict = max_position_embeddings
__magic_name__ : Union[str, Any] = type_vocab_size
__magic_name__ : str = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : Optional[Any] = position_embedding_type
__magic_name__ : int = use_cache | 331 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 1 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Union[str, Any] = FlaxAutoencoderKL
@property
def a__ ( self :Optional[Any] ):
snake_case_ : List[str] = 4
snake_case_ : Optional[int] = 3
snake_case_ : Optional[int] = (3_2, 3_2)
snake_case_ : Union[str, Any] = jax.random.PRNGKey(0 )
snake_case_ : Optional[int] = jax.random.uniform(_UpperCamelCase ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def a__ ( self :Any ):
snake_case_ : Optional[int] = {
"""block_out_channels""": [3_2, 6_4],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
snake_case_ : Tuple = self.dummy_input
return init_dict, inputs_dict | 367 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[List[PIL.Image.Image], np.ndarray]
lowercase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline | 8 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> None:
'''simple docstring'''
UpperCamelCase__ = generate_pascal_triangle(__A )
for row_idx in range(__A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def _UpperCamelCase ( __A ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__A , __A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCamelCase__ = []
for current_row_idx in range(__A ):
UpperCamelCase__ = populate_current_row(__A , __A )
triangle.append(__A )
return triangle
def _UpperCamelCase ( __A , __A ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCamelCase__ , UpperCamelCase__ = 1, 1
for current_col_idx in range(1 , __A ):
calculate_current_element(
__A , __A , __A , __A )
return current_row
def _UpperCamelCase ( __A , __A , __A , __A , ) -> None:
'''simple docstring'''
UpperCamelCase__ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCamelCase__ = triangle[current_row_idx - 1][current_col_idx]
UpperCamelCase__ = above_to_left_elt + above_to_right_elt
def _UpperCamelCase ( __A ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__A , __A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCamelCase__ = [[1]]
for row_index in range(1 , __A ):
UpperCamelCase__ = [0] + result[-1] + [0]
UpperCamelCase__ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCamelCase__ = sum(divmod(__A , 2 ) )
UpperCamelCase__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCamelCase__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCamelCase__ = row_first_half + row_second_half
result.append(__A )
return result
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__A , __A ) -> None:
UpperCamelCase__ = F'''{func.__name__}({value})'''
UpperCamelCase__ = timeit(F'''__main__.{call}''' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__A , __A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 80 |
"""simple docstring"""
from copy import deepcopy
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None ):
"""simple docstring"""
if arr is None and size is not None:
A_ : Union[str, Any] = size
A_ : List[str] = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = len(snake_case_ )
A_ : Optional[int] = deepcopy(snake_case_ )
for i in range(1 , self.size ):
A_ : Optional[Any] = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ : Optional[int] = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index - (index & (-index))
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ : List[str] = self.next_(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
self.add(snake_case_ , value - self.get(snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if right == 0:
return 0
A_ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ : Tuple = self.prev(snake_case_ )
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.query(snake_case_ , index + 1 )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
A_ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
a : List[Any] = logging.getLogger(__name__)
@dataclass
class a :
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case_ = field(
default=_lowerCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class a :
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Train language if it is different from the evaluation language."} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ ,snake_case_ ,snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''', __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
snake_case_ = load_dataset(
'''xnli''', model_args.language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
snake_case_ = load_dataset(
'''xnli''', model_args.train_language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ = train_dataset.features['''label'''].names
if training_args.do_eval:
snake_case_ = load_dataset(
'''xnli''', model_args.language, split='''validation''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ = eval_dataset.features['''label'''].names
if training_args.do_predict:
snake_case_ = load_dataset(
'''xnli''', model_args.language, split='''test''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ = predict_dataset.features['''label'''].names
# Labels
snake_case_ = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__UpperCAmelCase, idalabel={str(__UpperCAmelCase ): label for i, label in enumerate(__UpperCAmelCase )}, labelaid={label: i for i, label in enumerate(__UpperCAmelCase )}, finetuning_task='''xnli''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__UpperCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
snake_case_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case_ = False
def preprocess_function(__UpperCAmelCase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''], examples['''hypothesis'''], padding=__UpperCAmelCase, max_length=data_args.max_seq_length, truncation=__UpperCAmelCase, )
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ = min(len(__UpperCAmelCase ), data_args.max_train_samples )
snake_case_ = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
snake_case_ = train_dataset.map(
__UpperCAmelCase, batched=__UpperCAmelCase, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on train dataset''', )
# Log a few random samples from the training set:
for index in random.sample(range(len(__UpperCAmelCase ) ), 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ = min(len(__UpperCAmelCase ), data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
snake_case_ = eval_dataset.map(
__UpperCAmelCase, batched=__UpperCAmelCase, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on validation dataset''', )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
snake_case_ = min(len(__UpperCAmelCase ), data_args.max_predict_samples )
snake_case_ = predict_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
snake_case_ = predict_dataset.map(
__UpperCAmelCase, batched=__UpperCAmelCase, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on prediction dataset''', )
# Get the metric function
snake_case_ = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
snake_case_ = p.predictions[0] if isinstance(p.predictions, __UpperCAmelCase ) else p.predictions
snake_case_ = np.argmax(__UpperCAmelCase, axis=1 )
return metric.compute(predictions=__UpperCAmelCase, references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case_ = default_data_collator
elif training_args.fpaa:
snake_case_ = DataCollatorWithPadding(__UpperCAmelCase, pad_to_multiple_of=8 )
else:
snake_case_ = None
# Initialize our Trainer
snake_case_ = Trainer(
model=__UpperCAmelCase, args=__UpperCAmelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=__UpperCAmelCase, tokenizer=__UpperCAmelCase, data_collator=__UpperCAmelCase, )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
snake_case_ = min(__UpperCAmelCase, len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''', __UpperCAmelCase )
trainer.save_metrics('''train''', __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ = trainer.evaluate(eval_dataset=__UpperCAmelCase )
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
snake_case_ = min(__UpperCAmelCase, len(__UpperCAmelCase ) )
trainer.log_metrics('''eval''', __UpperCAmelCase )
trainer.save_metrics('''eval''', __UpperCAmelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
snake_case_ ,snake_case_ ,snake_case_ = trainer.predict(__UpperCAmelCase, metric_key_prefix='''predict''' )
snake_case_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__UpperCAmelCase )
)
snake_case_ = min(__UpperCAmelCase, len(__UpperCAmelCase ) )
trainer.log_metrics('''predict''', __UpperCAmelCase )
trainer.save_metrics('''predict''', __UpperCAmelCase )
snake_case_ = np.argmax(__UpperCAmelCase, axis=1 )
snake_case_ = os.path.join(training_args.output_dir, '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase, '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__UpperCAmelCase ):
snake_case_ = label_list[item]
writer.write(F"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 367 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
a : Tuple = 6_378_137.0
a : int = 6_356_752.314_245
a : Dict = 637_8137
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
snake_case_ = (AXIS_A - AXIS_B) / AXIS_A
snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
snake_case_ = radians(__UpperCAmelCase )
snake_case_ = radians(__UpperCAmelCase )
# Equation
snake_case_ = sin((phi_a - phi_a) / 2 )
snake_case_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case_ = sqrt(sin_sq_phi + (cos(__UpperCAmelCase ) * cos(__UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A ={
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
__SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 347 | 0 |
'''simple docstring'''
import os
def _A ( ):
"""simple docstring"""
__lowercase = os.path.dirname(os.path.realpath(A__ ) )
__lowercase = os.path.join(A__ , '''triangle.txt''' )
with open(A__ ) as f:
__lowercase = f.readlines()
__lowercase = []
for line in triangle:
__lowercase = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(A__ ) )
a.append(A__ )
for i in range(1 , len(A__ ) ):
for j in range(len(a[i] ) ):
__lowercase = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowercase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(A__ , A__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 369 |
'''simple docstring'''
from math import sqrt
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(A__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(A__ , A__ ), "'status' must been from type bool"
return status
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(A__ ):
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(A__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A__ ):
while quotient != 1:
if is_prime(A__ ) and (quotient % factor == 0):
ans.append(A__ )
quotient /= factor
else:
factor += 1
else:
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = max(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = min(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , A__ ), "compare bust been from type bool"
return number % 2 == 0
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , A__ ), "compare bust been from type bool"
return number % 2 != 0
def _A ( A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ ) and (number > 2) and is_even(A__ )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(A__ )
__lowercase = len(A__ )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A__ , A__ )
and (len(A__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(A__ , A__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(A__ )
__lowercase = prime_factorization(A__ )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(A__ , A__ )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(A__ )
__lowercase = prime_fac_a.count(A__ )
for _ in range(max(A__ , A__ ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# precondition
assert isinstance(A__ , A__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A__ ):
ans += 1
# precondition
assert isinstance(A__ , A__ ) and is_prime(
A__ ), "'ans' must been a prime number and from type int"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
is_prime(A__ ) and is_prime(A__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
while number < p_number_a:
ans.append(A__ )
number += 1
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
# precondition
assert (
isinstance(A__ , A__ )
and ans[0] != p_number_a
and ans[len(A__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(A__ )
# precondition
assert ans[0] == 1 and ans[len(A__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(A__ )
# precondition
assert (
isinstance(A__ , A__ )
and (divisors[0] == 1)
and (divisors[len(A__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(A__ ) , abs(A__ ) )
# precondition
assert (
isinstance(A__ , A__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 52 | 0 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowerCamelCase : Optional[int] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
state_dict.pop('pixel_mean' , _UpperCamelCase )
state_dict.pop('pixel_std' , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_SCREAMING_SNAKE_CASE =key.replace(_UpperCamelCase , _UpperCamelCase )
if re.match(_UpperCamelCase , _UpperCamelCase ):
_SCREAMING_SNAKE_CASE =int(re.match(_UpperCamelCase , _UpperCamelCase ).group(2 ) )
if layer_nb == 0:
_SCREAMING_SNAKE_CASE =key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
_SCREAMING_SNAKE_CASE =key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
_SCREAMING_SNAKE_CASE =key.replace('layers.2' , 'proj_out' )
_SCREAMING_SNAKE_CASE =value
_SCREAMING_SNAKE_CASE =model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Dict="ybelkada/segment-anything" ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =hf_hub_download(_UpperCamelCase , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
_SCREAMING_SNAKE_CASE =SamConfig()
elif "sam_vit_l" in model_name:
_SCREAMING_SNAKE_CASE =SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_SCREAMING_SNAKE_CASE =SamConfig(
vision_config=_UpperCamelCase , )
elif "sam_vit_h" in model_name:
_SCREAMING_SNAKE_CASE =SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_SCREAMING_SNAKE_CASE =SamConfig(
vision_config=_UpperCamelCase , )
_SCREAMING_SNAKE_CASE =torch.load(_UpperCamelCase , map_location='cpu' )
_SCREAMING_SNAKE_CASE =replace_keys(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =SamImageProcessor()
_SCREAMING_SNAKE_CASE =SamProcessor(image_processor=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =SamModel(_UpperCamelCase )
hf_model.load_state_dict(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =hf_model.to('cuda' )
_SCREAMING_SNAKE_CASE ='https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
_SCREAMING_SNAKE_CASE =Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' )
_SCREAMING_SNAKE_CASE =[[[4_00, 6_50]]]
_SCREAMING_SNAKE_CASE =[[1]]
_SCREAMING_SNAKE_CASE =processor(images=np.array(_UpperCamelCase ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**_UpperCamelCase )
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_SCREAMING_SNAKE_CASE =processor(
images=np.array(_UpperCamelCase ) , input_points=_UpperCamelCase , input_labels=_UpperCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**_UpperCamelCase )
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_SCREAMING_SNAKE_CASE =((75, 2_75, 17_25, 8_50),)
_SCREAMING_SNAKE_CASE =processor(images=np.array(_UpperCamelCase ) , input_boxes=_UpperCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**_UpperCamelCase )
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_SCREAMING_SNAKE_CASE =[[[4_00, 6_50], [8_00, 6_50]]]
_SCREAMING_SNAKE_CASE =[[1, 1]]
_SCREAMING_SNAKE_CASE =processor(
images=np.array(_UpperCamelCase ) , input_points=_UpperCamelCase , input_labels=_UpperCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**_UpperCamelCase )
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
lowerCamelCase : int = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 47 | """simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase( a , a , a , a , a=True , a="pt" ):
__a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , )
def _lowerCamelCase( a , a , a=None , ):
__a = input_ids.ne(a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ):
super().__init__()
__a = Path(lowerCamelCase ).joinpath(type_path + ".source" )
__a = Path(lowerCamelCase ).joinpath(type_path + ".target" )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" )
__a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" )
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" )
__a = source_inputs["input_ids"].squeeze()
__a = target_inputs["input_ids"].squeeze()
__a = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( lowerCamelCase ):
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def a__ ( self , lowerCamelCase ):
__a = torch.stack([x["input_ids"] for x in batch] )
__a = torch.stack([x["attention_mask"] for x in batch] )
__a = torch.stack([x["decoder_input_ids"] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = trim_batch(lowerCamelCase , lowerCamelCase )
__a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase )
__a = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__)
def _lowerCamelCase( a ):
return list(itertools.chain.from_iterable(a ) )
def _lowerCamelCase( a ):
__a = get_git_info()
save_json(a , os.path.join(a , "git_log.json" ) )
def _lowerCamelCase( a , a , a=4 , **a ):
with open(a , "w" ) as f:
json.dump(a , a , indent=a , **a )
def _lowerCamelCase( a ):
with open(a ) as f:
return json.load(a )
def _lowerCamelCase( ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase( a , a ):
return list(map(a , a ) )
def _lowerCamelCase( a , a ):
with open(a , "wb" ) as f:
return pickle.dump(a , a )
def _lowerCamelCase( a ):
def remove_articles(a ):
return re.sub(R"\b(a|an|the)\b" , " " , a )
def white_space_fix(a ):
return " ".join(text.split() )
def remove_punc(a ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) )
def _lowerCamelCase( a , a ):
__a = normalize_answer(a ).split()
__a = normalize_answer(a ).split()
__a = Counter(a ) & Counter(a )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(a )
__a = 1.0 * num_same / len(a )
__a = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase( a , a ):
return normalize_answer(a ) == normalize_answer(a )
def _lowerCamelCase( a , a ):
assert len(a ) == len(a )
__a = 0
for hypo, pred in zip(a , a ):
em += exact_match_score(a , a )
if len(a ) > 0:
em /= len(a )
return {"em": em}
def _lowerCamelCase( a ):
return model_prefix.startswith("rag" )
def _lowerCamelCase( a , a , a ):
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = "dropout_rate"
for p in extra_params:
if getattr(a , a , a ):
if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(a ) )
delattr(a , a )
continue
__a = p if hasattr(a , a ) else equivalent_param[p]
setattr(a , a , getattr(a , a ) )
delattr(a , a )
return hparams, config
| 261 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ) -> Optional[Any]:
return np.where(vector > 0 , lowercase__ , (alpha * (np.exp(lowercase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str]=5 ) -> Dict:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
UpperCAmelCase_ = torch.tensor(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
UpperCAmelCase_ = model(__UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
UpperCAmelCase_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
UpperCAmelCase_ = logits[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ = prob.topk(k=__UpperCamelCase , dim=0 )
UpperCAmelCase_ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__UpperCamelCase ) )] )
UpperCAmelCase_ = tokenizer.mask_token
UpperCAmelCase_ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
UpperCAmelCase_ = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(__UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__UpperCamelCase ) , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__UpperCamelCase , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_lowerCamelCase = CamembertTokenizer.from_pretrained('camembert-base')
_lowerCamelCase = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
_lowerCamelCase = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 177 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# Initialise PyTorch model
lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 48 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = (PNDMScheduler,)
lowerCAmelCase_ : str = (("num_inference_steps", 50),)
def lowercase__ ( self : int , **UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : List[str] = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**UpperCAmelCase_ )
return config
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : List[str]=0 , **UpperCAmelCase_ : Dict ):
lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
lowerCAmelCase : Optional[int] = kwargs.pop('num_inference_steps' , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Any = self.get_scheduler_config(**UpperCAmelCase_ )
lowerCAmelCase : List[Any] = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
lowerCAmelCase : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
lowerCAmelCase : List[str] = scheduler_class.from_pretrained(UpperCAmelCase_ )
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
lowerCAmelCase : List[str] = dummy_past_residuals[:]
lowerCAmelCase : List[str] = scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : Dict = new_scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase : List[str] = scheduler.step_plms(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : str = new_scheduler.step_plms(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : Optional[int] ):
pass
def lowercase__ ( self : int , UpperCAmelCase_ : Any=0 , **UpperCAmelCase_ : List[str] ):
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : Dict = kwargs.pop('num_inference_steps' , UpperCAmelCase_ )
lowerCAmelCase : str = self.dummy_sample
lowerCAmelCase : Union[str, Any] = 0.1 * sample
lowerCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase : Dict = scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : Tuple = new_scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase : Optional[int] = scheduler.step_plms(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : List[str] = new_scheduler.step_plms(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : Optional[Any] , **UpperCAmelCase_ : Tuple ):
lowerCAmelCase : int = self.scheduler_classes[0]
lowerCAmelCase : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase_ )
lowerCAmelCase : Tuple = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 10
lowerCAmelCase : Optional[Any] = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Tuple = scheduler.step_plms(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
return sample
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase : Any = kwargs.pop('num_inference_steps' , UpperCAmelCase_ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Any = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(UpperCAmelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase_ , 'set_timesteps' ):
lowerCAmelCase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase : Optional[int] = dummy_past_residuals[:]
lowerCAmelCase : str = scheduler.step_prk(UpperCAmelCase_ , 0 , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : Optional[Any] = scheduler.step_prk(UpperCAmelCase_ , 1 , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase : List[str] = scheduler.step_plms(UpperCAmelCase_ , 0 , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : Any = scheduler.step_plms(UpperCAmelCase_ , 1 , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : Dict ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def lowercase__ ( self : str ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase_ )
lowerCAmelCase : int = self.scheduler_classes[0]
lowerCAmelCase : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase : Tuple = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowercase__ ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ )
def lowercase__ ( self : Any ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowerCAmelCase : Dict = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : List[Any] = self.dummy_sample
lowerCAmelCase : List[Any] = 0.1 * sample
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase : List[str] = scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
def lowercase__ ( self : Optional[Any] ):
with self.assertRaises(UpperCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCAmelCase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.full_loop()
lowerCAmelCase : Tuple = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : str = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def lowercase__ ( self : Tuple ):
lowerCAmelCase : List[str] = self.full_loop(prediction_type='v_prediction' )
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[int] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def lowercase__ ( self : Dict ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase : Any = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01 )
lowerCAmelCase : Tuple = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def lowercase__ ( self : Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase : Any = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01 )
lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[int] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 323 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[Any] = ''
else:
lowerCAmelCase : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = ViTMSNConfig()
lowerCAmelCase : str = 1_000
lowerCAmelCase : List[str] = 'datasets/huggingface/label-files'
lowerCAmelCase : int = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 384
lowerCAmelCase : List[Any] = 1_536
lowerCAmelCase : Union[str, Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : List[Any] = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Any = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : Any = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : int = 7
lowerCAmelCase : str = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : List[str] = ViTMSNModel(_UpperCAmelCase )
lowerCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['target_encoder']
lowerCAmelCase : int = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowerCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Any = ViTImageProcessor(
size=config.image_size, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
lowerCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A_ = logging.get_logger("transformers.models.speecht5")
def A_ ( snake_case , snake_case , snake_case ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE:List[str] = checkpoint['''input_conv.weight_g''']
SCREAMING_SNAKE_CASE:Any = checkpoint['''input_conv.weight_v''']
SCREAMING_SNAKE_CASE:Dict = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE:Optional[Any] = checkpoint[F'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE:List[str] = checkpoint[F'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE:Dict = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE:int = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE:Dict = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE:Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE:Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE:Optional[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE:str = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE:Tuple = checkpoint['''output_conv.1.weight_g''']
SCREAMING_SNAKE_CASE:Dict = checkpoint['''output_conv.1.weight_v''']
SCREAMING_SNAKE_CASE:List[str] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def A_ ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE:int = SpeechTaHifiGanConfig.from_pretrained(__a )
else:
SCREAMING_SNAKE_CASE:int = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE:Any = SpeechTaHifiGan(__a )
SCREAMING_SNAKE_CASE:Tuple = torch.load(__a )
load_weights(orig_checkpoint["model"]["generator"] , __a , __a )
SCREAMING_SNAKE_CASE:Optional[int] = np.load(__a )
SCREAMING_SNAKE_CASE:int = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE:Optional[int] = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE:str = torch.from_numpy(__a ).float()
SCREAMING_SNAKE_CASE:List[Any] = torch.from_numpy(__a ).float()
model.save_pretrained(__a )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__a )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
A_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 139 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return EnvironmentCommand()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@staticmethod
def snake_case__ ( _lowerCAmelCase : Optional[int] ):
__snake_case : Optional[int] = parser.add_parser("""env""" )
download_parser.set_defaults(func=_lowerCAmelCase )
download_parser.add_argument(
"""--accelerate-config_file""" , default=_lowerCAmelCase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self : int , _lowerCAmelCase : Any , *_lowerCAmelCase : List[Any] ):
__snake_case : Union[str, Any] = accelerate_config_file
def snake_case__ ( self : int ):
__snake_case : Optional[Any] = '''not installed'''
if is_safetensors_available():
import safetensors
__snake_case : Any = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
__snake_case : Any = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__snake_case : Any = '''not installed'''
__snake_case : str = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__snake_case : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowerCAmelCase ):
__snake_case : Optional[int] = load_config_from_file(self._accelerate_config_file ).to_dict()
__snake_case : str = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else f'''\t{accelerate_config}'''
)
__snake_case : str = '''not installed'''
__snake_case : Union[str, Any] = '''NA'''
if is_torch_available():
import torch
__snake_case : Tuple = torch.__version__
__snake_case : int = torch.cuda.is_available()
__snake_case : Union[str, Any] = '''not installed'''
__snake_case : List[Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
__snake_case : int = tf.__version__
try:
# deprecated in v2.1
__snake_case : Tuple = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__snake_case : int = bool(tf.config.list_physical_devices("""GPU""" ) )
__snake_case : Union[str, Any] = '''not installed'''
__snake_case : Union[str, Any] = '''not installed'''
__snake_case : int = '''not installed'''
__snake_case : Any = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
__snake_case : List[str] = flax.__version__
__snake_case : Any = jax.__version__
__snake_case : Optional[Any] = jaxlib.__version__
__snake_case : Any = jax.lib.xla_bridge.get_backend().platform
__snake_case : Union[str, Any] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f'''{safetensors_version}''',
'''Accelerate version''': f'''{accelerate_version}''',
'''Accelerate config''': f'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': f'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': f'''{flax_version} ({jax_backend})''',
'''Jax version''': f'''{jax_version}''',
'''JaxLib version''': f'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(_lowerCAmelCase ) )
return info
@staticmethod
def snake_case__ ( _lowerCAmelCase : Dict ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 351 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
lowercase_ = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
lowercase_ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_INIT_CONFIGURATION
A : List[str] = RoFormerTokenizer
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any="[UNK]" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : Optional[int]="[PAD]" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
):
__snake_case : Tuple = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
__snake_case : List[Any] = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : List[str] = pre_tok_class(**_lowerCAmelCase )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Optional[Any] ):
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : str , _lowerCAmelCase : Dict ):
__snake_case : str = d
__snake_case : int = self.__dict__["""_tokenizer"""].get_vocab()
__snake_case : List[str] = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None ):
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Tuple , ):
__snake_case : Tuple = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowercase_ ):
UpperCAmelCase = burst_time[i]
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 999999999
UpperCAmelCase = 0
UpperCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowercase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase = remaining_time[j]
UpperCAmelCase = j
UpperCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase = remaining_time[short]
if minm == 0:
UpperCAmelCase = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase = False
# Find finish time of current process
UpperCAmelCase = increment_time + 1
# Calculate waiting time
UpperCAmelCase = finish_time - arrival_time[short]
UpperCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = [0] * no_of_processes
for i in range(lowercase_ ):
UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = 0
UpperCAmelCase = 0
for i in range(lowercase_ ):
UpperCAmelCase = total_waiting_time + waiting_time[i]
UpperCAmelCase = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
snake_case_ = int(input())
snake_case_ = [0] * no_of_processes
snake_case_ = [0] * no_of_processes
snake_case_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
snake_case_ , snake_case_ = map(int, input().split())
snake_case_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
snake_case_ = burst_time
snake_case_ = no_of_processes
snake_case_ = waiting_time
snake_case_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
snake_case_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 78 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> str:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Dict=1 ) -> List[Any]:
if self.graph.get(lowercase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase = [[w, v]]
if not self.graph.get(lowercase_ ):
UpperCAmelCase = []
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[Any] ) -> Dict:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Tuple=-2 , lowercase_ :List[Any]=-1 ) -> List[Any]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int=-1 ) -> Tuple:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[Any]=-2 ) -> Optional[int]:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[int] ) -> List[Any]:
UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> List[str]:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any=-2 ) -> int:
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return sorted_nodes
def UpperCAmelCase__ ( self :str ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int=-2 , lowercase_ :List[str]=-1 ) -> Any:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str]=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :List[str] ) -> Union[str, Any]:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :str , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int]=1 ) -> Dict:
# check if the u exists
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase = [[w, u]]
def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Tuple ) -> Optional[Any]:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
# the other way round
if self.graph.get(lowercase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int]=-2 , lowercase_ :Optional[int]=-1 ) -> List[str]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Optional[int]=-1 ) -> Any:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Dict , lowercase_ :int=-2 ) -> int:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[Any] ) -> str:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Any:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Union[str, Any]=-2 , lowercase_ :List[str]=-1 ) -> str:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Any , lowercase_ :int=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
| 78 | 1 |
'''simple docstring'''
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
stooge(a_ , 0 , len(a_ ) - 1 )
return arr
def UpperCAmelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
A_ : Optional[int] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
A_ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(a_ , a_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(a_ , i + t , (a_) )
# Recursively sort first 2/3 elements
stooge(a_ , a_ , (h - t) )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase__ : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 361 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Any = 0
def UpperCAmelCase_ ( self ) -> Dict:
A_ : str = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[int] = Path(_lowerCamelCase ) / """preprocessor_config.json"""
A_ : List[str] = Path(_lowerCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_lowerCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowerCamelCase , """w""" ) )
A_ : Dict = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[str] = Path(_lowerCamelCase ) / """preprocessor_config.json"""
A_ : Tuple = Path(_lowerCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_lowerCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowerCamelCase , """w""" ) )
A_ : Dict = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
A_ : Dict = Path(_lowerCamelCase ) / """preprocessor_config.json"""
A_ : Any = Path(_lowerCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_lowerCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowerCamelCase , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
A_ : str = AutoImageProcessor.from_pretrained(_lowerCamelCase ).to_dict()
config_dict.pop("""image_processor_type""" )
A_ : Dict = CLIPImageProcessor(**_lowerCamelCase )
# save in new folder
model_config.save_pretrained(_lowerCamelCase )
config.save_pretrained(_lowerCamelCase )
A_ : Any = AutoImageProcessor.from_pretrained(_lowerCamelCase )
# make sure private variable is not incorrectly saved
A_ : int = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = Path(_lowerCamelCase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_lowerCamelCase , """w""" ) , )
A_ : Tuple = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
with self.assertRaisesRegex(
_lowerCamelCase , """clip-base is not a local folder and is not a valid model identifier""" ):
A_ : Optional[Any] = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCAmelCase_ ( self ) -> Dict:
with self.assertRaisesRegex(
_lowerCamelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowerCamelCase , revision="""aaaaaa""" )
def UpperCAmelCase_ ( self ) -> Any:
with self.assertRaisesRegex(
_lowerCamelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
A_ : List[Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase_ ( self ) -> Dict:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCamelCase ):
A_ : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCamelCase ):
A_ : List[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowerCamelCase )
A_ : str = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowerCamelCase , trust_remote_code=_lowerCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def UpperCAmelCase_ ( self ) -> Dict:
try:
AutoConfig.register("""custom""" , _lowerCamelCase )
AutoImageProcessor.register(_lowerCamelCase , _lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCamelCase ):
AutoImageProcessor.register(_lowerCamelCase , _lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[Any] = Path(_lowerCamelCase ) / """preprocessor_config.json"""
A_ : Dict = Path(_lowerCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_lowerCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowerCamelCase , """w""" ) )
A_ : Optional[int] = CustomImageProcessor.from_pretrained(_lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCamelCase )
A_ : List[str] = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self ) -> List[str]:
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = True
try:
AutoConfig.register("""custom""" , _lowerCamelCase )
AutoImageProcessor.register(_lowerCamelCase , _lowerCamelCase )
# If remote code is not set, the default is to use local
A_ : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
A_ : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
A_ : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(_lowerCamelCase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 164 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase )
A__ = size if size is not None else {"height": 384, "width": 384}
A__ = get_size_dict(lowercase , default_to_square=lowercase )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase , default_to_square=lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
A__ = (size["height"], size["width"])
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = size if size is not None else self.size
A__ = get_size_dict(lowercase , default_to_square=lowercase )
A__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A__ = BatchFeature(data={"pixel_values": images} , tensor_type=lowercase )
return encoded_outputs
| 68 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ = """\
Text data.
Second line of data."""
lowerCAmelCase__ = """file"""
@pytest.fixture(scope="session" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
A__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" )
with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Any:
'''simple docstring'''
A__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
A__ = input_paths[compression_format]
A__ = tmp_path / "cache"
A__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Dict:
'''simple docstring'''
A__ = "custom_cache"
A__ = "custom_extracted_dir"
A__ = tmp_path / "custom_extracted_path"
if default_extracted:
A__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) )
A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A__ = xz_file
A__ = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
)
A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
# relative path
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]:
'''simple docstring'''
A__ = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
# relative path
A__ = "./__missing_file__.txt"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]:
'''simple docstring'''
A__ = get_from_cache(F'tmp://{tmpfs_file}' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_head("s3://huggingface.co" )
| 68 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : Any = emb.weight.shape
__snake_case : Tuple = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : List[str] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Tuple = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Optional[Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Dict = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : List[str] = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Optional[Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : Tuple = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : List[str] = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> str:
"""simple docstring"""
__snake_case : int = []
__snake_case : Any = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : List[Any] = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Any = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : Optional[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : Dict = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : Dict = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : str = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Optional[int] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : Tuple = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : int = shard_file
# Add the metadata
__snake_case : int = {"""total_size""": total_size}
__snake_case : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : str = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 13 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _A :
lowercase__: str
lowercase__: Optional[str] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.major, self.minor, self.patch
def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return Version(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
return other
raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
return self.tuple < other.tuple
def __hash__( self : Any ) -> Any:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return self.version_str
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return ".".join(str(_lowerCamelCase ) for v in version_tuple )
| 13 | 1 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if n_term == "":
return []
lowercase_ : list = []
for temp in range(int(__SCREAMING_SNAKE_CASE ) ):
series.append(F'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
_lowercase : Tuple = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 93 |
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : Tuple = 1
lowerCamelCase : int = 1
lowerCamelCase : Optional[Any] = {1: 1}
for inputa in range(2 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase : str = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase : str = counter
if counter > pre_counter:
lowerCamelCase : str = inputa
lowerCamelCase : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 48 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=None ):
return field(default_factory=lambda: default , metadata=_snake_case )
@dataclass
class snake_case_ :
UpperCAmelCase__ : Union[str, Any] = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
UpperCAmelCase__ : Optional[Any] = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
UpperCAmelCase__ : List[Any] = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
UpperCAmelCase__ : Optional[Any] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
UpperCAmelCase__ : List[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
UpperCAmelCase__ : Dict = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
UpperCAmelCase__ : Dict = field(default=lowerCamelCase_ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
UpperCAmelCase__ : Any = field(default=lowerCamelCase_ , metadata={'''help''': '''Benchmark training of model'''} )
UpperCAmelCase__ : Optional[Any] = field(default=lowerCamelCase_ , metadata={'''help''': '''Verbose memory tracing'''} )
UpperCAmelCase__ : Tuple = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
UpperCAmelCase__ : Optional[Any] = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
UpperCAmelCase__ : Tuple = field(default=lowerCamelCase_ , metadata={'''help''': '''Trace memory line by line'''} )
UpperCAmelCase__ : str = field(default=lowerCamelCase_ , metadata={'''help''': '''Save result to a CSV file'''} )
UpperCAmelCase__ : Tuple = field(default=lowerCamelCase_ , metadata={'''help''': '''Save all print statements in a log file'''} )
UpperCAmelCase__ : List[Any] = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to print environment information'''} )
UpperCAmelCase__ : List[Any] = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=f'inference_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
UpperCAmelCase__ : Any = field(
default=f'inference_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
UpperCAmelCase__ : Union[str, Any] = field(
default=f'train_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
UpperCAmelCase__ : Optional[Any] = field(
default=f'train_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
UpperCAmelCase__ : Optional[Any] = field(
default=f'env_info_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
UpperCAmelCase__ : str = field(
default=f'log_{round(time() )}.csv' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
UpperCAmelCase__ : str = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
UpperCAmelCase__ : Tuple = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' ,_a ,)
def lowerCamelCase__( self :str ) -> str:
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 369 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase : Any = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["ConvNextFeatureExtractor"]
lowercase : Dict = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case = 10_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = 2**power
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = list(__snake_case )
_UpperCamelCase = 0
for i in list_num:
sum_of_num += int(__snake_case )
return sum_of_num
if __name__ == "__main__":
_a = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_a = solution(power)
print("""Sum of the digits is: """, result)
| 194 | 0 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : List[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase : Tuple = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowercase : Optional[int] = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class lowerCamelCase__ ( a__):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
_A = []
def __init__( self :Optional[Any] , a :Optional[Any] , a :Dict="<unk>" , a :List[Any]="<s>" , a :Any="</s>" , a :List[str]="<pad>" , a :Optional[Any]="[SEP]" , a :Tuple="[MASK]" , a :List[Any]="[CLS]" , a :Dict = None , **a :str , ) -> None:
__UpperCamelCase : Optional[Any] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
__UpperCamelCase : Dict = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
__UpperCamelCase : Optional[Any] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
__UpperCamelCase : List[Any] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
__UpperCamelCase : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
__UpperCamelCase : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase : Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
__UpperCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , sep_token=_lowerCamelCase , mask_token=_lowerCamelCase , cls_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__UpperCamelCase : Tuple = vocab_file
__UpperCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def _lowerCamelCase ( self :List[str] ) -> Optional[int]:
return self.sp_model.get_piece_size()
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
__UpperCamelCase : List[Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Dict ) -> Dict:
__UpperCamelCase : int = self.__dict__.copy()
__UpperCamelCase : List[Any] = None
return state
def __setstate__( self :Any , a :int ) -> Tuple:
__UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Tuple = {}
__UpperCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :Optional[int] , a :Any ) -> List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _lowerCamelCase ( self :Optional[int] , a :str ) -> Union[str, Any]:
return self.sp_model.piece_to_id(_lowerCamelCase )
def _lowerCamelCase ( self :List[str] , a :Union[str, Any] ) -> Any:
__UpperCamelCase : Union[str, Any] = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def _lowerCamelCase ( self :List[str] , a :str ) -> Optional[Any]:
__UpperCamelCase : List[str] = []
__UpperCamelCase : List[Any] = ''''''
__UpperCamelCase : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
__UpperCamelCase : str = True
__UpperCamelCase : Dict = []
else:
current_sub_tokens.append(_lowerCamelCase )
__UpperCamelCase : int = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def _lowerCamelCase ( self :str , a :List[Any] , a :Tuple = False , a :Union[str, Any] = None , a :Optional[Any] = True , **a :Optional[int] , ) -> str:
__UpperCamelCase : List[str] = kwargs.pop("use_source_tokenizer" , _lowerCamelCase )
__UpperCamelCase : Optional[int] = self.convert_ids_to_tokens(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__UpperCamelCase : List[str] = []
__UpperCamelCase : Optional[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
__UpperCamelCase : Any = []
sub_texts.append(_lowerCamelCase )
else:
current_sub_text.append(_lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__UpperCamelCase : List[str] = re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(_lowerCamelCase ) )
else:
__UpperCamelCase : Any = ''''''.join(_lowerCamelCase )
__UpperCamelCase : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__UpperCamelCase : Union[str, Any] = self.clean_up_tokenization(_lowerCamelCase )
return clean_text
else:
return text
def _lowerCamelCase ( self :List[Any] , a :Union[str, Any] , a :int = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : List[Any] = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , "wb" ) as fi:
__UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def _lowerCamelCase ( self :str , a :Optional[Any] , a :List[Any] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase : int = [self.cls_token_id]
__UpperCamelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self :Optional[Any] , a :Dict , a :Union[str, Any] = None , a :Union[str, Any] = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
def _lowerCamelCase ( self :int , a :Tuple , a :Optional[Any] = None ) -> List[int]:
__UpperCamelCase : Union[str, Any] = [self.sep_token_id]
__UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 369 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : int) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0) == 0)
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
assert and_gate(0 , 0) == 0
assert and_gate(0 , 1) == 0
assert and_gate(1 , 0) == 0
assert and_gate(1 , 1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 151 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=33 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Optional[int]:
'''simple docstring'''
lowercase__: Optional[int] = parent
lowercase__: Optional[Any] = batch_size
lowercase__: Optional[int] = seq_length
lowercase__: Dict = is_training
lowercase__: List[str] = use_input_mask
lowercase__: Optional[Any] = use_token_type_ids
lowercase__: Optional[int] = use_labels
lowercase__: Dict = vocab_size
lowercase__: Tuple = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: str = num_attention_heads
lowercase__: Tuple = intermediate_size
lowercase__: Union[str, Any] = hidden_act
lowercase__: Optional[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = max_position_embeddings
lowercase__: int = type_vocab_size
lowercase__: Dict = type_sequence_label_size
lowercase__: int = initializer_range
lowercase__: List[str] = num_labels
lowercase__: Dict = num_choices
lowercase__: Dict = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Any = None
if self.use_input_mask:
lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Union[str, Any] = None
lowercase__: int = None
lowercase__: Optional[Any] = None
if self.use_labels:
lowercase__: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: str = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Optional[Any] = EsmModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
lowercase__: Union[str, Any] = model(lowerCAmelCase__ )
lowercase__: Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[int] = EsmForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: int = self.num_labels
lowercase__: Union[str, Any] = EsmForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): str = config_and_inputs
lowercase__: List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : Optional[Any] = False
__lowercase : str = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__lowercase : Dict = ()
__lowercase : Optional[Any] = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : int = True
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Dict = EsmModelTester(self )
lowercase__: List[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__: Dict = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: int = EsmModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: str = self.model_tester.prepare_config_and_inputs()[0]
lowercase__: str = EsmEmbeddings(config=lowerCAmelCase__ )
lowercase__: List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase__: Any = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase__: int = create_position_ids_from_input_ids(lowerCAmelCase__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: str = self.model_tester.prepare_config_and_inputs()[0]
lowercase__: str = EsmEmbeddings(config=lowerCAmelCase__ )
lowercase__: str = torch.empty(2 , 4 , 30 )
lowercase__: List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase__: Dict = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase__: List[str] = embeddings.create_position_ids_from_inputs_embeds(lowerCAmelCase__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch
class __a ( __UpperCamelCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
with torch.no_grad():
lowercase__: List[str] = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase__: Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Dict = model(lowerCAmelCase__ )[0]
lowercase__: Optional[int] = 33
lowercase__: Any = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Optional[int] = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase__: str = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase__: Any = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase__: str = model(lowerCAmelCase__ )[0]
# compare the actual values for a slice.
lowercase__: Optional[int] = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 196 |
import unittest
from knapsack import knapsack as k
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = 0
lowercase__: List[Any] = [0]
lowercase__: str = [0]
lowercase__: Tuple = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
lowercase__: Optional[Any] = [60]
lowercase__: Dict = [10]
lowercase__: str = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = 3
lowercase__: List[str] = [1, 2, 3]
lowercase__: Union[str, Any] = [3, 2, 1]
lowercase__: Union[str, Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 5 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = 50
lowercase__: str = [60, 100, 120]
lowercase__: Any = [10, 20, 30]
lowercase__: List[Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 196 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'data2vec-audio'
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=32 , _SCREAMING_SNAKE_CASE: List[Any]=768 , _SCREAMING_SNAKE_CASE: List[Any]=12 , _SCREAMING_SNAKE_CASE: str=12 , _SCREAMING_SNAKE_CASE: List[str]=3072 , _SCREAMING_SNAKE_CASE: Union[str, Any]="gelu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.1 , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=0.1 , _SCREAMING_SNAKE_CASE: Dict=0.0 , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: List[str]=0.1 , _SCREAMING_SNAKE_CASE: int=0.02 , _SCREAMING_SNAKE_CASE: Tuple=1e-5 , _SCREAMING_SNAKE_CASE: Optional[Any]="gelu" , _SCREAMING_SNAKE_CASE: Dict=(512, 512, 512, 512, 512, 512, 512) , _SCREAMING_SNAKE_CASE: Tuple=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE: int=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: Dict=16 , _SCREAMING_SNAKE_CASE: Dict=19 , _SCREAMING_SNAKE_CASE: Optional[Any]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.05 , _SCREAMING_SNAKE_CASE: int=10 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: Tuple=10 , _SCREAMING_SNAKE_CASE: Optional[Any]=0 , _SCREAMING_SNAKE_CASE: int="sum" , _SCREAMING_SNAKE_CASE: Optional[int]=False , _SCREAMING_SNAKE_CASE: List[Any]=False , _SCREAMING_SNAKE_CASE: Optional[int]=256 , _SCREAMING_SNAKE_CASE: List[Any]=(512, 512, 512, 512, 1500) , _SCREAMING_SNAKE_CASE: Dict=(5, 3, 3, 1, 1) , _SCREAMING_SNAKE_CASE: Optional[Any]=(1, 2, 3, 1, 1) , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Optional[int]=0 , _SCREAMING_SNAKE_CASE: Optional[Any]=1 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Optional[int]=2 , _SCREAMING_SNAKE_CASE: Optional[int]=3 , _SCREAMING_SNAKE_CASE: Tuple=None , **_SCREAMING_SNAKE_CASE: Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Optional[int] = feat_extract_activation
__lowerCAmelCase : Optional[int] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = conv_bias
__lowerCAmelCase : Any = num_conv_pos_embeddings
__lowerCAmelCase : str = num_conv_pos_embedding_groups
__lowerCAmelCase : List[Any] = conv_pos_kernel_size
__lowerCAmelCase : str = len(self.conv_dim)
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : str = hidden_dropout
__lowerCAmelCase : Tuple = attention_dropout
__lowerCAmelCase : Optional[Any] = activation_dropout
__lowerCAmelCase : Union[str, Any] = feat_proj_dropout
__lowerCAmelCase : Union[str, Any] = final_dropout
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Any = initializer_range
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : str = mask_time_prob
__lowerCAmelCase : Optional[Any] = mask_time_length
__lowerCAmelCase : Optional[int] = mask_time_min_masks
__lowerCAmelCase : List[str] = mask_feature_prob
__lowerCAmelCase : int = mask_feature_length
__lowerCAmelCase : str = mask_feature_min_masks
# ctc loss
__lowerCAmelCase : List[Any] = ctc_loss_reduction
__lowerCAmelCase : str = ctc_zero_infinity
# adapter
__lowerCAmelCase : Any = add_adapter
__lowerCAmelCase : Any = adapter_kernel_size
__lowerCAmelCase : str = adapter_stride
__lowerCAmelCase : List[Any] = num_adapter_layers
__lowerCAmelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : Union[str, Any] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
return math.prod(self.conv_stride) | 58 |
"""simple docstring"""
__snake_case : Dict = 65_521
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : str = 1
__lowerCAmelCase : Union[str, Any] = 0
for plain_chr in plain_text:
__lowerCAmelCase : List[str] = (a + ord(__snake_case )) % MOD_ADLER
__lowerCAmelCase : str = (b + a) % MOD_ADLER
return (b << 16) | a | 58 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case : Union[str, Any] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 94 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowerCAmelCase ( yaml.SafeLoader ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCAmelCase__ :str = [tuple(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else key for key in keys]
lowerCAmelCase__ :Optional[int] = Counter(__UpperCAmelCase )
lowerCAmelCase__ :int = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(__UpperCAmelCase )
return mapping
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple[Optional[str], str]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCAmelCase__ :Optional[int] = full_content[1:].index('---' ) + 1
lowerCAmelCase__ :Union[str, Any] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[str] = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def snake_case ( cls , __UpperCAmelCase ):
'''simple docstring'''
with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file:
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCAmelCase )
else:
return cls()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if path.exists():
with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file:
lowerCAmelCase__ :Optional[Any] = readme_file.read()
else:
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :Union[str, Any] = self._to_readme(__UpperCAmelCase )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase = None ):
'''simple docstring'''
if readme_content is not None:
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = _split_yaml_from_readme(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowerCAmelCase__ :str = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def snake_case ( cls , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCAmelCase__ :int = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' )
__A = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__A = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
__A = ap.parse_args()
__A = Path(args.readme_filepath)
__A = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 293 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowerCAmelCase : Union[str, Any] =False
__lowerCAmelCase : Any =True
__lowerCAmelCase : Optional[Any] =False
if __name__ == "__main__":
__lowerCAmelCase : int =argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase : str =parser.parse_args()
__lowerCAmelCase : List[Any] ={
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
__lowerCAmelCase : List[str] ={
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
__lowerCAmelCase : Optional[int] ="" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
__lowerCAmelCase : Tuple =reader.read()
__lowerCAmelCase : Union[str, Any] =json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
__lowerCAmelCase : Optional[int] =UNetaDModel(**config)
else:
__lowerCAmelCase : str =UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
__lowerCAmelCase : Tuple =class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowerCAmelCase : Union[str, Any] =dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowerCAmelCase : str =config[key]
del config[key]
__lowerCAmelCase : Union[str, Any] =[k.replace("UNetRes", "") for k in config["down_block_types"]]
__lowerCAmelCase : Union[str, Any] =[k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
__lowerCAmelCase : List[str] =torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
__lowerCAmelCase : Optional[int] ={}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
__lowerCAmelCase : int =False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
__lowerCAmelCase : Optional[Any] =param_value
__lowerCAmelCase : Tuple =True
if not has_changed:
__lowerCAmelCase : Any =param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 364 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : List[Any] ="examples/"
__lowerCAmelCase : Dict ={
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] ={
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : str ="README.md"
def UpperCamelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.read()
A__, A__ = REPLACE_PATTERNS[pattern]
A__ = replace.replace("VERSION" , _lowerCamelCase )
A__ = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : int ):
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="examples" )
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def UpperCamelCase ( ):
A__ = "🤗 Transformers currently provides the following architectures"
A__ = "1. Want to contribute a new model?"
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
# Find the start of the list.
A__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
A__ = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_lowerCamelCase )
def UpperCamelCase ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
A__ = f.read()
A__ = REPLACE_PATTERNS["init"][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Dict=False ):
A__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
A__ = default_version.base_version
elif patch:
A__ = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
A__ = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
A__ = input(F"Which version are you releasing? [{default_version}]" )
if len(_lowerCamelCase ) == 0:
A__ = default_version
print(F"Updating version to {version}." )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def UpperCamelCase ( ):
A__ = get_version()
A__ = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
A__ = current_version.base_version
# Check with the user we got that right.
A__ = input(F"Which version are we developing now? [{dev_version}]" )
if len(_lowerCamelCase ) == 0:
A__ = dev_version
print(F"Updating version to {version}." )
global_version_update(_lowerCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : int =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 123 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 89 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = CpmAntTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
_a : List[Any] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
_a : Optional[int] = """今天天气真好!"""
_a : Optional[int] = ["""今天""", """天气""", """真""", """好""", """!"""]
_a : Optional[Any] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
_a : Optional[Any] = """今天天气真好!"""
_a : Any = [tokenizer.bos_token] + tokens
_a : Dict = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,_UpperCamelCase )
_a : Optional[Any] = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
| 357 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
A_ : Optional[int] = 'bert-base-cased'
A_ : int = 'fp16'
A_ : Any = 'bf16'
A_ : Optional[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class A_ ( _a ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> int:
super().setUp()
__UpperCAmelCase = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def lowerCAmelCase_ (self ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowercase__ ):
__UpperCAmelCase = self.dist_env.copy()
__UpperCAmelCase = F'''{i + 1}'''
__UpperCAmelCase = strategy
with mockenv_context(**lowercase__ ):
__UpperCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowerCAmelCase_ (self ) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowercase__ ):
__UpperCAmelCase = self.dist_env.copy()
__UpperCAmelCase = prefetch_policy
with mockenv_context(**lowercase__ ):
__UpperCAmelCase = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowerCAmelCase_ (self ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowercase__ ):
__UpperCAmelCase = self.dist_env.copy()
__UpperCAmelCase = state_dict_type
with mockenv_context(**lowercase__ ):
__UpperCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = AutoModel.from_pretrained(lowercase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
__UpperCAmelCase = self.dist_env.copy()
__UpperCAmelCase = policy
if policy == "TRANSFORMER_BASED_WRAP":
__UpperCAmelCase = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
__UpperCAmelCase = '''2000'''
with mockenv_context(**lowercase__ ):
__UpperCAmelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
__UpperCAmelCase = self.dist_env.copy()
__UpperCAmelCase = '''TRANSFORMER_BASED_WRAP'''
__UpperCAmelCase = '''T5Layer'''
with mockenv_context(**lowercase__ ):
__UpperCAmelCase = FullyShardedDataParallelPlugin()
with self.assertRaises(lowercase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowercase__ )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
__UpperCAmelCase = self.dist_env.copy()
__UpperCAmelCase = '''SIZE_BASED_WRAP'''
__UpperCAmelCase = '''0'''
with mockenv_context(**lowercase__ ):
__UpperCAmelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowerCAmelCase_ (self ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
__UpperCAmelCase = self.dist_env.copy()
__UpperCAmelCase = mp_dtype
with mockenv_context(**lowercase__ ):
__UpperCAmelCase = Accelerator()
if mp_dtype == "fp16":
__UpperCAmelCase = torch.floataa
elif mp_dtype == "bf16":
__UpperCAmelCase = torch.bfloataa
__UpperCAmelCase = MixedPrecision(param_dtype=lowercase__ , reduce_dtype=lowercase__ , buffer_dtype=lowercase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowercase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowercase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowercase__ )
def lowerCAmelCase_ (self ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
__UpperCAmelCase = self.dist_env.copy()
__UpperCAmelCase = str(lowercase__ ).lower()
with mockenv_context(**lowercase__ ):
__UpperCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowercase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class A_ ( _a ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> List[Any]:
super().setUp()
__UpperCAmelCase = 0.82
__UpperCAmelCase = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
__UpperCAmelCase = {
'''multi_gpu_fp16''': 3_200,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_000,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
__UpperCAmelCase = 160
__UpperCAmelCase = 160
__UpperCAmelCase = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
__UpperCAmelCase = cmd.copy()
for i, strategy in enumerate(lowercase__ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase__ , env=os.environ.copy() )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
__UpperCAmelCase = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(lowercase__ ):
__UpperCAmelCase = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
__UpperCAmelCase = len(lowercase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
__UpperCAmelCase = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase__ , env=os.environ.copy() )
__UpperCAmelCase = cmd_config[:-1]
__UpperCAmelCase = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase__ , env=os.environ.copy() )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
__UpperCAmelCase = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
__UpperCAmelCase = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(lowercase__ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase__ , env=os.environ.copy() )
| 333 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase_ = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
lowerCamelCase_ = f'''https://www.google.com/search?q={query}&num=100'''
lowerCamelCase_ = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
lowerCamelCase_ = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
lowerCamelCase_ = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 351 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ )
return (new_height, new_width)
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase_ = get_resize_output_image_size(
lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase_ ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(lowercase_ ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 14 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Any = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _snake_case ( A__ ):
_lowercase : Dict = '''encodec'''
def __init__( self , a=[1.5, 3.0, 6.0, 12.0, 24.0] , a=2_4000 , a=1 , a=False , a=None , a=None , a=128 , a=32 , a=1 , a=[8, 5, 4, 2] , a="weight_norm" , a=7 , a=7 , a=3 , a=2 , a=True , a="reflect" , a=2 , a=2 , a=1.0 , a=1024 , a=None , a=True , **a , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = target_bandwidths
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = audio_channels
SCREAMING_SNAKE_CASE = normalize
SCREAMING_SNAKE_CASE = chunk_length_s
SCREAMING_SNAKE_CASE = overlap
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_filters
SCREAMING_SNAKE_CASE = num_residual_layers
SCREAMING_SNAKE_CASE = upsampling_ratios
SCREAMING_SNAKE_CASE = norm_type
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = last_kernel_size
SCREAMING_SNAKE_CASE = residual_kernel_size
SCREAMING_SNAKE_CASE = dilation_growth_rate
SCREAMING_SNAKE_CASE = use_causal_conv
SCREAMING_SNAKE_CASE = pad_mode
SCREAMING_SNAKE_CASE = compress
SCREAMING_SNAKE_CASE = num_lstm_layers
SCREAMING_SNAKE_CASE = trim_right_ratio
SCREAMING_SNAKE_CASE = codebook_size
SCREAMING_SNAKE_CASE = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 137 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a_ : Tuple = logging.getLogger()
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('-f')
SCREAMING_SNAKE_CASE = parser.parse_args()
return args.f
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> None:
SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
SCREAMING_SNAKE_CASE = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py')
with patch.object(a , 'argv' , a):
SCREAMING_SNAKE_CASE = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(a , 0.6_66)
@slow
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(a)
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(a)
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(a)
| 137 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {"vocab_file": "sentencepiece.bpe.model"}
A__ = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
A__ = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
A__ = "▁"
class a ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = ['''input_ids''', '''attention_mask''']
def __init__( self :int ,__lowercase :Union[str, Any] ,__lowercase :Optional[int]="<s>" ,__lowercase :Optional[Any]="</s>" ,__lowercase :Any="</s>" ,__lowercase :Optional[int]="<s>" ,__lowercase :int="<unk>" ,__lowercase :Optional[int]="<pad>" ,__lowercase :Dict="<mask>" ,__lowercase :List[str] = None ,**__lowercase :str ,):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : int = AddedToken(A__ ,lstrip=A__ ,rstrip=A__ ) if isinstance(A__ ,A__ ) else mask_token
snake_case__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ ,eos_token=A__ ,unk_token=A__ ,sep_token=A__ ,cls_token=A__ ,pad_token=A__ ,mask_token=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,)
snake_case__ : Optional[Any] = vocab_file
snake_case__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
snake_case__ : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
snake_case__ : Optional[Any] = len(self.sp_model ) - 1
snake_case__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[int] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Optional[Any] = [self.cls_token_id]
snake_case__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self :Tuple ,__lowercase :Tuple ,__lowercase :str = None ,__lowercase :Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ ,token_ids_a=A__ ,already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1, 1] + ([0] * len(A__ )) + [1]
def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[Any] ,__lowercase :Union[str, Any] = None ):
snake_case__ : str = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self :Dict ):
return len(self.sp_model )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[int] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self :Tuple ,__lowercase :Any ):
return self.sp_model.encode(A__ ,out_type=A__ )
def __lowerCamelCase ( self :Tuple ,__lowercase :List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : int = self.sp_model.PieceToId(A__ )
return spm_id if spm_id else self.unk_token_id
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A__ )
def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[Any] ):
snake_case__ : int = []
snake_case__ : Dict = ''''''
snake_case__ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__ ) + token
snake_case__ : List[Any] = True
snake_case__ : Dict = []
else:
current_sub_tokens.append(A__ )
snake_case__ : List[str] = False
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def __getstate__( self :str ):
snake_case__ : Tuple = self.__dict__.copy()
snake_case__ : List[Any] = None
return state
def __setstate__( self :Dict ,__lowercase :Tuple ):
snake_case__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
snake_case__ : Optional[int] = {}
snake_case__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Optional[int] ,__lowercase :Optional[int] = None ):
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : int = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ ,'''wb''' ) as fi:
snake_case__ : int = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 358 |
import argparse
from collections import defaultdict
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__lowerCAmelCase , '''r''' ) as f:
snake_case__ : str = f.readlines()
snake_case__ : List[str] = f"""class {class_name}("""
snake_case__ : Any = f"""{4 * ' '}def {test_name}("""
snake_case__ : Optional[int] = f"""{8 * ' '}{correct_line.split()[0]}"""
snake_case__ : List[str] = f"""{16 * ' '}{correct_line.split()[0]}"""
snake_case__ : Any = False
snake_case__ : Optional[int] = False
snake_case__ : Optional[Any] = False
snake_case__ : int = False
snake_case__ : Union[str, Any] = 0
snake_case__ : str = 0
snake_case__ : Union[str, Any] = []
for line in lines:
if line.startswith(__lowerCAmelCase ):
snake_case__ : Optional[Any] = True
elif in_class and line.startswith(__lowerCAmelCase ):
snake_case__ : Optional[int] = True
elif in_class and in_func and (line.startswith(__lowerCAmelCase ) or line.startswith(__lowerCAmelCase )):
snake_case__ : int = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
snake_case__ : Tuple = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
snake_case__ : List[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
snake_case__ : Optional[int] = False
else:
new_lines.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' ) as f:
for line in new_lines:
f.write(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Dict:
"""simple docstring"""
if fail is not None:
with open(__lowerCAmelCase , '''r''' ) as f:
snake_case__ : Optional[int] = {l.strip() for l in f.readlines()}
else:
snake_case__ : Tuple = None
with open(__lowerCAmelCase , '''r''' ) as f:
snake_case__ : Optional[int] = f.readlines()
snake_case__ : Tuple = defaultdict(__lowerCAmelCase )
for line in correct_lines:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
A__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 44 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : Any = logging.get_logger(__name__)
_A : Any = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = """convbert"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : int=7_68 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : Dict=30_72 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : int=0.0_2 , SCREAMING_SNAKE_CASE__ : Any=1e-1_2 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Tuple=7_68 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : Any=9 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Dict:
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = embedding_size
__lowerCAmelCase = head_ratio
__lowerCAmelCase = conv_kernel_size
__lowerCAmelCase = num_groups
__lowerCAmelCase = classifier_dropout
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 229 | '''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 370 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 229 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A : Union[str, Any] = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_A : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 229 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ :Tuple = logging.get_logger(__name__)
a_ :Union[str, Any] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """deberta-v2"""
def __init__( self : Union[str, Any], _snake_case : Dict=1_2_8_1_0_0, _snake_case : Any=1_5_3_6, _snake_case : Tuple=2_4, _snake_case : int=2_4, _snake_case : Optional[int]=6_1_4_4, _snake_case : Optional[int]="gelu", _snake_case : Optional[int]=0.1, _snake_case : List[str]=0.1, _snake_case : str=5_1_2, _snake_case : Optional[int]=0, _snake_case : Optional[int]=0.0_2, _snake_case : Dict=1e-7, _snake_case : int=False, _snake_case : Any=-1, _snake_case : List[str]=0, _snake_case : Tuple=True, _snake_case : Any=None, _snake_case : Union[str, Any]=0, _snake_case : Tuple="gelu", **_snake_case : Union[str, Any], ) ->Optional[int]:
super().__init__(**_snake_case )
snake_case__ : Dict = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : List[str] = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Optional[int] = relative_attention
snake_case__ : Tuple = max_relative_positions
snake_case__ : Union[str, Any] = pad_token_id
snake_case__ : Optional[int] = position_biased_input
# Backwards compatibility
if type(_snake_case ) == str:
snake_case__ : int = [x.strip() for x in pos_att_type.lower().split('|' )]
snake_case__ : List[str] = pos_att_type
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : Optional[int] = kwargs.get('pooler_hidden_size', _snake_case )
snake_case__ : int = pooler_dropout
snake_case__ : str = pooler_hidden_act
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def lowercase_ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : int = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase_ ( self : Dict ) ->int:
return 1_2
def lowercase_ ( self : Tuple, _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], _snake_case : int = -1, _snake_case : int = -1, _snake_case : int = -1, _snake_case : bool = False, _snake_case : Optional["TensorType"] = None, _snake_case : int = 3, _snake_case : int = 4_0, _snake_case : int = 4_0, _snake_case : "PreTrainedTokenizerBase" = None, ) ->Mapping[str, Any]:
snake_case__ : Union[str, Any] = super().generate_dummy_inputs(preprocessor=_snake_case, framework=_snake_case )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 277 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowercase : str = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowercase : Union[str, Any] = concatenate_datasets
lowercase : Tuple = DownloadConfig
lowercase : Optional[int] = DownloadManager
lowercase : Tuple = DownloadMode
lowercase : Optional[Any] = DownloadConfig
lowercase : List[Any] = DownloadMode
lowercase : str = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 225 |
import enum
import shutil
import sys
lowercase , lowercase : List[Any] = shutil.get_terminal_size()
lowercase : Union[str, Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class A__ ( enum.Enum ):
"""simple docstring"""
__A : List[str] = 0
__A : str = 1
def A_ ( A__ , A__="" ) -> int:
sys.stdout.write(str(A__ ) + end )
sys.stdout.flush()
def A_ ( A__ , A__ , A__="" ) -> int:
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , A__ )
def A_ ( ) -> Any:
forceWrite('\r' )
def A_ ( A__ , A__ ) -> List[str]:
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def A_ ( ) -> Any:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def A_ ( ) -> Any:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 225 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : NestedDataStructureLike[PathLike] , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : Any , ) ->str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
a = field
a = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
a = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
if self.streaming:
a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a = None
a = None
a = None
a = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
a = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
'''simple docstring'''
def __init__( self : List[str] , __UpperCAmelCase : Dataset , __UpperCAmelCase : Union[PathLike, BinaryIO] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : List[Any] , ) ->int:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
a = dataset
a = path_or_buf
a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a = num_proc
a = '''utf-8'''
a = to_json_kwargs
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = self.to_json_kwargs.pop('''path_or_buf''' , __UpperCAmelCase )
a = self.to_json_kwargs.pop('''orient''' , '''records''' )
a = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
a = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
a = self.to_json_kwargs.pop('''compression''' , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__UpperCAmelCase ) as buffer:
a = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
a = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ) ->Dict:
"""simple docstring"""
a , a , a , a , a = args
a = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
a = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : BinaryIO , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Optional[Any] , ) ->int:
"""simple docstring"""
a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
a = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
a , a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ):
# test for the above condition
self.test()
def lowerCamelCase__ ( self ):
_lowercase : Any = 0
_lowercase : Tuple = False
while not completed:
if counter == 1:
self.reset()
_lowercase : Optional[Any] = self.advance()
if not self.does_advance(UpperCAmelCase_ ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
_lowercase , _lowercase , _lowercase : Optional[Any] = self.update(UpperCAmelCase_ )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def lowerCamelCase__ ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ,UpperCAmelCase_=False ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
super(UpperCAmelCase_ ,self ).__init__()
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or len(UpperCAmelCase_ ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
_lowercase : int = token_ids
_lowercase : str = len(self.token_ids )
_lowercase : List[str] = -1 # the index of the currently fulfilled step
_lowercase : Optional[Any] = False
def lowerCamelCase__ ( self ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[Any] = False
if self.does_advance(UpperCAmelCase_ ):
self.fulfilled_idx += 1
_lowercase : Any = True
if self.fulfilled_idx == (self.seqlen - 1):
_lowercase : int = True
_lowercase : Union[str, Any] = completed
else:
# failed to make progress.
_lowercase : Optional[Any] = True
self.reset()
return stepped, completed, reset
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = False
_lowercase : List[str] = 0
def lowerCamelCase__ ( self ):
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase__ ( self ,UpperCAmelCase_=False ):
_lowercase : Any = PhrasalConstraint(self.token_ids )
if stateful:
_lowercase : Dict = self.seqlen
_lowercase : Tuple = self.fulfilled_idx
_lowercase : Tuple = self.completed
return new_constraint
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=True ):
_lowercase : Optional[int] = max([len(UpperCAmelCase_ ) for one in nested_token_ids] )
_lowercase : List[Any] = {}
for token_ids in nested_token_ids:
_lowercase : Union[str, Any] = root
for tidx, token_id in enumerate(UpperCAmelCase_ ):
if token_id not in level:
_lowercase : int = {}
_lowercase : str = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f""" {nested_token_ids}.""" )
_lowercase : int = root
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Any = self.trie
for current_token in current_seq:
_lowercase : Optional[Any] = start[current_token]
_lowercase : Any = list(start.keys() )
return next_tokens
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : str = self.next_tokens(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) == 0
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[Any] = list(root.values() )
if len(UpperCAmelCase_ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase_ ) for nn in next_nodes] )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = self.count_leaves(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) != leaf_count
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
super(UpperCAmelCase_ ,self ).__init__()
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or len(UpperCAmelCase_ ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
_lowercase : str = DisjunctiveTrie(UpperCAmelCase_ )
_lowercase : Optional[Any] = nested_token_ids
_lowercase : Optional[Any] = self.trie.max_height
_lowercase : Optional[int] = []
_lowercase : int = False
def lowerCamelCase__ ( self ):
_lowercase : str = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase_ ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
_lowercase : Dict = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : List[str] = False
if self.does_advance(UpperCAmelCase_ ):
self.current_seq.append(UpperCAmelCase_ )
_lowercase : Union[str, Any] = True
else:
_lowercase : Optional[int] = True
self.reset()
_lowercase : List[str] = self.trie.reached_leaf(self.current_seq )
_lowercase : Optional[int] = completed
return stepped, completed, reset
def lowerCamelCase__ ( self ):
_lowercase : Tuple = False
_lowercase : Union[str, Any] = []
def lowerCamelCase__ ( self ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase__ ( self ,UpperCAmelCase_=False ):
_lowercase : Tuple = DisjunctiveConstraint(self.token_ids )
if stateful:
_lowercase : Optional[Any] = self.seqlen
_lowercase : Any = self.current_seq
_lowercase : Dict = self.completed
return new_constraint
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : Dict = constraints
# max # of steps required to fulfill a given constraint
_lowercase : str = max([c.seqlen for c in constraints] )
_lowercase : str = len(UpperCAmelCase_ )
_lowercase : Any = False
self.init_state()
def lowerCamelCase__ ( self ):
_lowercase : List[str] = []
_lowercase : Dict = None
_lowercase : List[str] = [constraint.copy(stateful=UpperCAmelCase_ ) for constraint in self.constraints]
def lowerCamelCase__ ( self ):
_lowercase : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase__ ( self ):
_lowercase : Any = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_lowercase : Any = constraint.advance()
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.append(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.extend(UpperCAmelCase_ )
else:
_lowercase : List[Any] = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.append(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.extend(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_lowercase , _lowercase : int = self.add(UpperCAmelCase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
_lowercase , _lowercase : Tuple = False, False
if self.completed:
_lowercase : Union[str, Any] = True
_lowercase : List[Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_lowercase , _lowercase , _lowercase : List[Any] = self.inprogress_constraint.update(UpperCAmelCase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase_ ) )
_lowercase : List[Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_lowercase : List[Any] = None
if len(self.pending_constraints ) == 0:
# we're done!
_lowercase : int = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase_ ):
_lowercase , _lowercase , _lowercase : Dict = pending_constraint.update(UpperCAmelCase_ )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(UpperCAmelCase_ )
_lowercase : Tuple = None
if not complete and stepped:
_lowercase : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_lowercase : Optional[Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_lowercase : Any = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase__ ( self ,UpperCAmelCase_=True ):
_lowercase : List[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_lowercase : int = [
constraint.copy(stateful=UpperCAmelCase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_lowercase : Optional[int] = self.inprogress_constraint.copy(stateful=UpperCAmelCase_ )
_lowercase : Dict = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 336 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309 | 1 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase = 10**12 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = 1
lowercase : Dict = 0
lowercase : int = 1
lowercase : Any = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 365 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
_UpperCamelCase: Any = 'naver-clova-ix/donut-base'
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Tuple:
lowercase : Any = DonutProcessor.from_pretrained(lowerCAmelCase )
def lowercase ( self : Dict ) -> Union[str, Any]:
lowercase : Tuple = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowercase : Tuple = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowercase : Any = self.processor.tokenajson(lowerCAmelCase )
self.assertDictEqual(lowerCAmelCase, lowerCAmelCase )
| 53 | 0 |
'''simple docstring'''
import argparse
a_ : Union[str, Any] = """docs/source/_static/js/custom.js"""
def __snake_case ( UpperCAmelCase_ : Dict ):
with open(UpperCAmelCase_ , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
lowerCamelCase_ = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
a_ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 55 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = UnCLIPImageVariationPipeline
_UpperCAmelCase : Tuple = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
_UpperCAmelCase : List[str] = IMAGE_VARIATION_BATCH_PARAMS
_UpperCAmelCase : Union[str, Any] = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
_UpperCAmelCase : Tuple = False
@property
def A ( self : Tuple ):
'''simple docstring'''
return 32
@property
def A ( self : Dict ):
'''simple docstring'''
return 32
@property
def A ( self : Dict ):
'''simple docstring'''
return self.time_input_dim
@property
def A ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 100
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowercase )
@property
def A ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
_snake_case = UnCLIPTextProjModel(**lowercase )
return model
@property
def A ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
_snake_case = UNetaDConditionModel(**lowercase )
return model
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def A ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def A ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
_snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.dummy_decoder
_snake_case = self.dummy_text_proj
_snake_case = self.dummy_text_encoder
_snake_case = self.dummy_tokenizer
_snake_case = self.dummy_super_res_first
_snake_case = self.dummy_super_res_last
_snake_case = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
_snake_case = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
_snake_case = CLIPImageProcessor(crop_size=32 , size=32 )
_snake_case = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def A ( self : List[Any] , lowercase : Optional[Any] , lowercase : Optional[Any]=0 , lowercase : List[str]=True ):
'''simple docstring'''
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
if str(lowercase ).startswith('mps' ):
_snake_case = torch.manual_seed(lowercase )
else:
_snake_case = torch.Generator(device=lowercase ).manual_seed(lowercase )
if pil_image:
_snake_case = input_image * 0.5 + 0.5
_snake_case = input_image.clamp(0 , 1 )
_snake_case = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_snake_case = DiffusionPipeline.numpy_to_pil(lowercase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def A ( self : int ):
'''simple docstring'''
_snake_case = 'cpu'
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowercase )
_snake_case = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs(lowercase , pil_image=lowercase )
_snake_case = pipe(**lowercase )
_snake_case = output.images
_snake_case = self.get_dummy_inputs(lowercase , pil_image=lowercase )
_snake_case = pipe(
**lowercase , return_dict=lowercase , )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'cpu'
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowercase )
_snake_case = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs(lowercase , pil_image=lowercase )
_snake_case = pipe(**lowercase )
_snake_case = output.images
_snake_case = self.get_dummy_inputs(lowercase , pil_image=lowercase )
_snake_case = pipe(
**lowercase , return_dict=lowercase , )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
'''simple docstring'''
_snake_case = 'cpu'
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowercase )
_snake_case = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs(lowercase , pil_image=lowercase )
_snake_case = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
_snake_case = pipe(**lowercase )
_snake_case = output.images
_snake_case = self.get_dummy_inputs(lowercase , pil_image=lowercase )
_snake_case = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
_snake_case = pipe(
**lowercase , return_dict=lowercase , )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_snake_case = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Any ):
'''simple docstring'''
_snake_case = torch.device('cpu' )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Optional[int] = 1
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowercase )
_snake_case = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = torch.Generator(device=lowercase ).manual_seed(0 )
_snake_case = pipe.decoder.dtype
_snake_case = 1
_snake_case = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_snake_case = pipe.prepare_latents(
lowercase , dtype=lowercase , device=lowercase , generator=lowercase , latents=lowercase , scheduler=DummyScheduler() )
_snake_case = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_snake_case = pipe.prepare_latents(
lowercase , dtype=lowercase , device=lowercase , generator=lowercase , latents=lowercase , scheduler=DummyScheduler() )
_snake_case = self.get_dummy_inputs(lowercase , pil_image=lowercase )
_snake_case = pipe(
**lowercase , decoder_latents=lowercase , super_res_latents=lowercase ).images
_snake_case = self.get_dummy_inputs(lowercase , pil_image=lowercase )
# Don't pass image, instead pass embedding
_snake_case = pipeline_inputs.pop('image' )
_snake_case = pipe.image_encoder(lowercase ).image_embeds
_snake_case = pipe(
**lowercase , decoder_latents=lowercase , super_res_latents=lowercase , image_embeddings=lowercase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def A ( self : Dict ):
'''simple docstring'''
_snake_case = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_snake_case = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase , expected_max_diff=lowercase )
@skip_mps
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = torch_device == 'cpu'
_snake_case = True
_snake_case = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=lowercase , relax_max_difference=lowercase , additional_params_copy_to_batched_inputs=lowercase , )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_snake_case = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowercase , additional_params_copy_to_batched_inputs=lowercase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowercase )
@skip_mps
def A ( self : int ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A ( self : Dict ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def A ( self : List[Any] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
_snake_case = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
_snake_case = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
_snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case = pipeline(
lowercase , generator=lowercase , output_type='np' , )
_snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(lowercase , lowercase , 15 ) | 130 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a_ ( __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray:
_snake_case = cva.getAffineTransform(__lowercase , __lowercase )
return cva.warpAffine(__lowercase , __lowercase , (rows, cols) )
if __name__ == "__main__":
# read original image
_lowerCamelCase : Optional[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
_lowerCamelCase : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_lowerCamelCase , _lowerCamelCase : List[Any] = gray_img.shape
# set different points to rotate image
_lowerCamelCase : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
_lowerCamelCase : Optional[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
_lowerCamelCase : List[str] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
_lowerCamelCase : Dict = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
_lowerCamelCase : int = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_lowerCamelCase : Any = plt.figure(1)
_lowerCamelCase : List[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show() | 130 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ["flax", "transformers"]
def __init__( self : Optional[int] , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Any:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : List[str] , *_UpperCamelCase : int , **_UpperCamelCase : str ) ->Any:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Any ) ->Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ["flax", "transformers"]
def __init__( self : Any , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Optional[int] ) ->Dict:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : int ) ->str:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Dict ) ->Union[str, Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ["flax", "transformers"]
def __init__( self : Union[str, Any] , *_UpperCamelCase : Dict , **_UpperCamelCase : Tuple ) ->List[str]:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : str ) ->Union[str, Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : Any , *_UpperCamelCase : Any , **_UpperCamelCase : List[str] ) ->int:
requires_backends(cls , ['''flax''', '''transformers'''] )
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ["flax", "transformers"]
def __init__( self : Tuple , *_UpperCamelCase : str , **_UpperCamelCase : int ) ->Any:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : List[Any] , *_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ) ->List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def snake_case__( cls : Union[str, Any] , *_UpperCamelCase : str , **_UpperCamelCase : List[Any] ) ->List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] ) | 8 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
UpperCAmelCase__ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
UpperCAmelCase__ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
UpperCAmelCase__ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 | """simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int = 1_00 ) -> int:
__A : Dict = set()
__A : str = 0
__A : List[str] = n + 1 # maximum limit
for a in range(2 , _UpperCAmelCase ):
for b in range(2 , _UpperCAmelCase ):
__A : Optional[int] = a**b # calculates the current power
collect_powers.add(_UpperCAmelCase ) # adds the result to the set
return len(_UpperCAmelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip()))) | 190 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str
__UpperCamelCase : int
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> list[str]:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_a : List[Any] =all_rotations(_UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a : BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCAmelCase ),
}
return response
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_a : List[str] =int(_UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_a : Optional[int] =[""""""] * len(_UpperCAmelCase )
for _ in range(len(_UpperCAmelCase ) ):
for i in range(len(_UpperCAmelCase ) ):
_a : int =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__: Any = '''Provide a string that I will generate its BWT transform: '''
A__: Union[str, Any] = input(entry_msg).strip()
A__: Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
A__: Union[str, Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 276 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[Any] = 'funnel'
lowercase : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , __UpperCamelCase=3_05_22 , __UpperCamelCase=[4, 4, 4] , __UpperCamelCase=None , __UpperCamelCase=2 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=64 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu_new" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=None , __UpperCamelCase=1E-9 , __UpperCamelCase="mean" , __UpperCamelCase="relative_shift" , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , **__UpperCamelCase , ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : int = vocab_size
__UpperCamelCase : Optional[int] = block_sizes
__UpperCamelCase : List[str] = [1] * len(__UpperCamelCase ) if block_repeats is None else block_repeats
assert len(__UpperCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__UpperCamelCase : Optional[Any] = num_decoder_layers
__UpperCamelCase : Optional[Any] = d_model
__UpperCamelCase : Optional[int] = n_head
__UpperCamelCase : Union[str, Any] = d_head
__UpperCamelCase : List[Any] = d_inner
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : str = hidden_dropout
__UpperCamelCase : Optional[Any] = attention_dropout
__UpperCamelCase : List[Any] = activation_dropout
__UpperCamelCase : Any = initializer_range
__UpperCamelCase : Optional[int] = initializer_std
__UpperCamelCase : List[Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__UpperCamelCase : List[str] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__UpperCamelCase : Optional[int] = attention_type
__UpperCamelCase : Tuple = separate_cls
__UpperCamelCase : int = truncate_seq
__UpperCamelCase : List[Any] = pool_q_only
super().__init__(**__UpperCamelCase )
@property
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowerCamelCase ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def __lowerCamelCase ( self , __UpperCamelCase ) -> str:
'''simple docstring'''
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." ) | 171 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : int = logging.get_logger(__name__)
lowercase : Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
for attribute in key.split("." ):
__UpperCamelCase : Any = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
__UpperCamelCase : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
__UpperCamelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCamelCase : Dict = value
elif weight_type == "weight_g":
__UpperCamelCase : Union[str, Any] = value
elif weight_type == "weight_v":
__UpperCamelCase : Union[str, Any] = value
elif weight_type == "bias":
__UpperCamelCase : str = value
else:
__UpperCamelCase : Union[str, Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
__UpperCamelCase : Optional[int] = []
__UpperCamelCase : List[Any] = fairseq_model.state_dict()
__UpperCamelCase : List[str] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCamelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase : Tuple = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
__UpperCamelCase : Dict = True
if "*" in mapped_key:
__UpperCamelCase : str = name.split(_lowerCAmelCase )[0].split("." )[-2]
__UpperCamelCase : Optional[Any] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
__UpperCamelCase : Any = "weight_g"
elif "weight_v" in name:
__UpperCamelCase : Optional[int] = "weight_v"
elif "weight" in name:
__UpperCamelCase : str = "weight"
elif "bias" in name:
__UpperCamelCase : List[str] = "bias"
else:
__UpperCamelCase : Optional[Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
__UpperCamelCase : Tuple = full_name.split("conv_layers." )[-1]
__UpperCamelCase : Dict = name.split("." )
__UpperCamelCase : Optional[int] = int(items[0] )
__UpperCamelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCamelCase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCamelCase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCamelCase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCamelCase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=True ):
if config_path is not None:
__UpperCamelCase : Dict = HubertConfig.from_pretrained(_lowerCAmelCase )
else:
__UpperCamelCase : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase : int = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase : Optional[Any] = target_dict.pad_index
__UpperCamelCase : Any = target_dict.bos_index
__UpperCamelCase : List[str] = target_dict.eos_index
__UpperCamelCase : Tuple = len(target_dict.symbols )
__UpperCamelCase : str = os.path.join(_lowerCAmelCase , "vocab.json" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCAmelCase )
__UpperCamelCase : int = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , )
__UpperCamelCase : List[Any] = True if config.feat_extract_norm == "layer" else False
__UpperCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
__UpperCamelCase : int = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
__UpperCamelCase : Optional[Any] = HubertForCTC(_lowerCAmelCase )
else:
__UpperCamelCase : Union[str, Any] = HubertModel(_lowerCAmelCase )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase : Optional[Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : List[str] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 171 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int:
lowerCAmelCase = defaultdict(UpperCamelCase__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 338 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__A = 10
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
for i in range(UpperCamelCase__ , UpperCamelCase__ ):
if array[i] == target:
return i
return -1
def lowerCamelCase_ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = len(UpperCamelCase__ )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = (left + right) // 3 + 1
__lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowerCamelCase = one_third - 1
elif array[two_third] < target:
__lowerCamelCase = two_third + 1
else:
__lowerCamelCase = one_third + 1
__lowerCamelCase = two_third - 1
else:
return -1
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = (left + right) // 3 + 1
__lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase__ , one_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input("Enter numbers separated by comma:\n").strip()
__A = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__A = int(input("Enter the number to be found in the list:\n").strip())
__A = ite_ternary_search(collection, target)
__A = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 90 | 0 |
"""simple docstring"""
from math import factorial
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(lowerCamelCase__ ) // (factorial(lowerCamelCase__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 350 | """simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False ) -> Any:
"""simple docstring"""
try:
snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case = default
else:
# KEY is set, convert it to True or False.
try:
snake_case = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
SCREAMING_SNAKE_CASE__ = parse_flag_from_env("RUN_SLOW", default=False)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return unittest.skip('Test was skipped' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=None ) -> int:
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCamelCase ) , f"""test requires torch version >= {version}""" )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCamelCase )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = True
@classmethod
def snake_case ( cls ):
"""simple docstring"""
snake_case = tempfile.mkdtemp()
@classmethod
def snake_case ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = mocks if isinstance(lowerCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
snake_case = AcceleratorState()
snake_case = tensor[None].clone().to(state.device )
snake_case = gather(_UpperCamelCase ).cpu()
snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = returncode
snake_case = stdout
snake_case = stderr
async def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
while True:
snake_case = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=False , _UpperCamelCase : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(_UpperCamelCase ) )
snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case = []
snake_case = []
def tee(_UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]="" ):
snake_case = line.decode('utf-8' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : str=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Tuple=1_8_0 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[Any]=True ) -> _RunOutput:
"""simple docstring"""
snake_case = asyncio.get_event_loop()
snake_case = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
snake_case = ' '.join(_UpperCamelCase )
if result.returncode > 0:
snake_case = '\n'.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
try:
snake_case = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , 'decode' ):
snake_case = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 149 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( __lowerCamelCase : int = 4 ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = abs(__lowerCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__lowerCamelCase )] for y in range(__lowerCamelCase )]
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
return reverse_row(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
return reverse_row(reverse_column(__lowerCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
return reverse_column(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = [list(__lowerCamelCase ) for x in zip(*__lowerCamelCase )]
return matrix
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = matrix[::-1]
return matrix
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->None:
for i in matrix:
print(*__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
lowercase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
lowercase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 58 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( __lowerCamelCase : int ) ->list[int]:
if num <= 0:
_SCREAMING_SNAKE_CASE = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = [True] * (num + 1)
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = int(math.sqrt(__lowerCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__lowerCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , __lowerCamelCase ):
if sieve[i] is True:
_SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__lowerCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 58 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "rwkv"
a_ = {"max_position_embeddings": "context_length"}
def __init__( self : Optional[Any] , __A : List[str]=5_0_2_7_7 , __A : Dict=1_0_2_4 , __A : Optional[Any]=4_0_9_6 , __A : List[str]=3_2 , __A : Any=None , __A : str=None , __A : List[Any]=1e-5 , __A : int=0 , __A : List[str]=0 , __A : List[Any]=6 , __A : List[str]=False , __A : Union[str, Any]=True , **__A : int , ):
snake_case__ : Dict = vocab_size
snake_case__ : Tuple = context_length
snake_case__ : Optional[int] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case__ : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case__ : List[Any] = layer_norm_epsilon
snake_case__ : List[str] = rescale_every
snake_case__ : Optional[int] = use_cache
snake_case__ : Optional[int] = bos_token_id
snake_case__ : Tuple = eos_token_id
super().__init__(
tie_word_embeddings=__A , bos_token_id=__A , eos_token_id=__A , **__A )
| 359 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : Any = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Optional[int] = {
"""allenai/led-base-16384""": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = LEDTokenizer
a_ = ["input_ids", "attention_mask"]
def __init__( self : int , __A : Optional[int]=None , __A : Any=None , __A : Any=None , __A : int="replace" , __A : Optional[Any]="<s>" , __A : List[Any]="</s>" , __A : Dict="</s>" , __A : Union[str, Any]="<s>" , __A : List[Any]="<unk>" , __A : Optional[Any]="<pad>" , __A : Optional[int]="<mask>" , __A : str=False , __A : Optional[Any]=True , **__A : List[Any] , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
snake_case__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : List[str] = getattr(__A , pre_tok_state.pop("type" ) )
snake_case__ : Tuple = add_prefix_space
snake_case__ : Any = pre_tok_class(**__A )
snake_case__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case__ : str = "post_processor"
snake_case__ : Union[str, Any] = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
snake_case__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ : int = tuple(state["sep"] )
if "cls" in state:
snake_case__ : Optional[int] = tuple(state["cls"] )
snake_case__ : Union[str, Any] = False
if state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : Optional[Any] = add_prefix_space
snake_case__ : List[str] = True
if state.get("trim_offsets" , __A ) != trim_offsets:
snake_case__ : str = trim_offsets
snake_case__ : Tuple = True
if changes_to_apply:
snake_case__ : Any = getattr(__A , state.pop("type" ) )
snake_case__ : Optional[Any] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowercase ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self : str , __A : int ):
snake_case__ : str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
snake_case__ : Optional[int] = value
def _lowercase ( self : List[str] , *__A : Union[str, Any] , **__A : int ):
snake_case__ : Dict = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A , **__A )
def _lowercase ( self : Union[str, Any] , *__A : Tuple , **__A : Union[str, Any] ):
snake_case__ : List[str] = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A , **__A )
def _lowercase ( self : Union[str, Any] , __A : str , __A : Optional[str] = None ):
snake_case__ : str = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _lowercase ( self : Union[str, Any] , __A : int , __A : str=None ):
snake_case__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : str , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Dict = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Optional[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : Dict = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : Optional[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Optional[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 286 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A = random.Random()
def lowerCAmelCase_ ( __a , __a=1.0 , __a=None , __a=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
lowerCamelCase__: Union[str, Any] =global_rng
lowerCamelCase__: List[Any] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Dict=2_000 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=16_000 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[Any]=True , ) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =parent
lowerCamelCase__: int =batch_size
lowerCamelCase__: Tuple =min_seq_length
lowerCamelCase__: Dict =max_seq_length
lowerCamelCase__: Optional[int] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__: Any =feature_size
lowerCamelCase__: List[str] =padding_value
lowerCamelCase__: Union[str, Any] =sampling_rate
lowerCamelCase__: Dict =return_attention_mask
lowerCamelCase__: Optional[Any] =do_normalize
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=False) ->List[Any]:
'''simple docstring'''
def _flatten(UpperCAmelCase_ : Optional[int]):
return list(itertools.chain(*UpperCAmelCase_))
if equal_length:
lowerCamelCase__: List[Any] =floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
lowerCamelCase__: Dict =[
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
lowerCamelCase__: List[str] =[np.asarray(UpperCAmelCase_) for x in speech_inputs]
return speech_inputs
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = WavaVecaFeatureExtractor
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =WavaVecaFeatureExtractionTester(self)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
self.assertTrue(np.all(np.mean(UpperCAmelCase_ , axis=0) < 1E-3))
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ , axis=0) - 1) < 1E-3))
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
lowerCamelCase__: str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__: Optional[Any] =[floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
lowerCamelCase__: Any =[np.asarray(UpperCAmelCase_) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase__: str =feat_extract(speech_inputs[0] , return_tensors="np").input_values
lowerCamelCase__: int =feat_extract(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
# Test batched
lowerCamelCase__: List[str] =feat_extract(UpperCAmelCase_ , return_tensors="np").input_values
lowerCamelCase__: Dict =feat_extract(UpperCAmelCase_ , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
# Test 2-D numpy arrays are batched.
lowerCamelCase__: Union[str, Any] =[floats_list((1, x))[0] for x in (800, 800, 800)]
lowerCamelCase__: Union[str, Any] =np.asarray(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =feat_extract(UpperCAmelCase_ , return_tensors="np").input_values
lowerCamelCase__: Dict =feat_extract(UpperCAmelCase_ , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCamelCase__: Optional[int] =[floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
lowerCamelCase__: List[Any] =["longest", "max_length", "do_not_pad"]
lowerCamelCase__: Optional[Any] =[None, 1_600, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: str =feat_extract(UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors="np")
lowerCamelCase__: Tuple =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800])
self.assertTrue(input_values[0][800:].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_values[1][:1_000])
self.assertTrue(input_values[0][1_000:].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_values[2][:1_200])
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCamelCase__: Union[str, Any] =range(800 , 1_400 , 200)
lowerCamelCase__: Optional[int] =[floats_list((1, x))[0] for x in lengths]
lowerCamelCase__: Any =["longest", "max_length", "do_not_pad"]
lowerCamelCase__: int =[None, 1_600, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: int =feat_extract(UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding=UpperCAmelCase_)
lowerCamelCase__: Any =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800])
self._check_zero_mean_unit_variance(input_values[1][:1_000])
self._check_zero_mean_unit_variance(input_values[2][:1_200])
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCamelCase__: Union[str, Any] =[floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
lowerCamelCase__: Tuple =feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=1_000 , padding="max_length" , return_tensors="np")
lowerCamelCase__: int =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCamelCase__: Any =[floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
lowerCamelCase__: str =feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=1_000 , padding="longest" , return_tensors="np")
lowerCamelCase__: int =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1, :1_000])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000))
lowerCamelCase__: Optional[Any] =[floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
lowerCamelCase__: str =feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=2_000 , padding="longest" , return_tensors="np")
lowerCamelCase__: int =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1, :1_000])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200))
@require_torch
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
import torch
lowerCamelCase__: Tuple =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCamelCase__: int =np.random.rand(100).astype(np.floataa)
lowerCamelCase__: Any =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase__: str =feature_extractor.pad([{"input_values": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_values.dtype == np.floataa)
lowerCamelCase__: Any =feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCamelCase__: Optional[int] =WavaVecaConfig.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: List[str] =WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase_)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer")
| 10 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
stooge(snake_case__ , 0 , len(snake_case__ ) - 1 )
return arr
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case : Dict = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case__ , i + t , (snake_case__) )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 64 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowercase : List[Any] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Union[str, Any]:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Dict:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
_snake_case = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case = 'weight'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Optional[Any]:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None ) -> Any:
# load the pre-trained checkpoints
_snake_case = torch.load(__A )
_snake_case = WavLMConfigOrig(checkpoint['cfg'] )
_snake_case = WavLMOrig(__A )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
_snake_case = WavLMConfig.from_pretrained(__A )
else:
_snake_case = WavLMConfig()
_snake_case = WavLMModel(__A )
recursively_load_weights(__A , __A )
hf_wavlm.save_pretrained(__A )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase : List[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 160 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(__A , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 160 | 1 |
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> qiskit.result.counts.Counts:
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowerCAmelCase_ : Optional[int] = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCAmelCase_ : Dict = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""") | 262 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 262 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__snake_case : int = logging.getLogger(__name__)
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = 'summarization'
__lowercase : int = ['loss']
__lowercase : int = ROUGE_KEYS
__lowercase : Tuple = 'rouge2'
def __init__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
A_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , mode=self.mode , **_SCREAMING_SNAKE_CASE )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
A_ = Path(self.output_dir ) / '''metrics.json'''
A_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
A_ = 0
A_ = defaultdict(_SCREAMING_SNAKE_CASE )
A_ = self.config.model_type
A_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
A_ = {
'''data_dir''': self.hparams.data_dir,
'''max_source_length''': self.hparams.max_source_length,
'''prefix''': self.model.config.prefix or '''''',
}
A_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
A_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ = get_git_info()['''repo_sha''']
A_ = hparams.num_workers
A_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ):
A_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ = self.decoder_start_token_id
A_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
A_ = False
A_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ = self.hparams.eval_max_gen_length
else:
A_ = self.model.config.max_length
A_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Dict[str, List[str]]:
A_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_SCREAMING_SNAKE_CASE , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
A_ = True
return readable_batch
def __A ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
return self.model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
A_ = self.tokenizer.batch_decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
return lmap(str.strip , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = self.tokenizer.pad_token_id
A_ , A_ = batch['''input_ids'''], batch['''attention_mask''']
A_ = batch['''labels''']
if isinstance(self.model , _SCREAMING_SNAKE_CASE ):
A_ = self.model._shift_right(_SCREAMING_SNAKE_CASE )
else:
A_ = shift_tokens_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ = decoder_input_ids
self.save_readable_batch(_SCREAMING_SNAKE_CASE )
A_ = self(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
A_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ = nn.CrossEntropyLoss(ignore_index=_SCREAMING_SNAKE_CASE )
assert lm_logits.shape[-1] == self.vocab_size
A_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
A_ , A_ = label_smoothed_nll_loss(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.hparams.label_smoothing , ignore_index=_SCREAMING_SNAKE_CASE )
return (loss,)
@property
def __A ( self ) -> int:
return self.tokenizer.pad_token_id
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
A_ = self._step(_SCREAMING_SNAKE_CASE )
A_ = dict(zip(self.loss_names , _SCREAMING_SNAKE_CASE ) )
# tokens per batch
A_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
A_ = batch['''input_ids'''].shape[0]
A_ = batch['''input_ids'''].eq(self.pad ).sum()
A_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
return self._generative_step(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="val" ) -> Dict:
self.step_count += 1
A_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ = losses['''loss''']
A_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
A_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ = torch.tensor(_SCREAMING_SNAKE_CASE ).type_as(_SCREAMING_SNAKE_CASE )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_SCREAMING_SNAKE_CASE )
A_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ = self.step_count
self.metrics[prefix].append(_SCREAMING_SNAKE_CASE ) # callback writes this to self.metrics_save_path
A_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
return calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> dict:
A_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_SCREAMING_SNAKE_CASE , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
A_ = self.ids_to_clean_text(_SCREAMING_SNAKE_CASE )
A_ = self.ids_to_clean_text(batch['''labels'''] )
A_ = self._step(_SCREAMING_SNAKE_CASE )
A_ = dict(zip(self.loss_names , _SCREAMING_SNAKE_CASE ) )
A_ = self.calc_generative_metrics(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = np.mean(lmap(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
base_metrics.update(gen_time=_SCREAMING_SNAKE_CASE , gen_len=_SCREAMING_SNAKE_CASE , preds=_SCREAMING_SNAKE_CASE , target=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return base_metrics
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return self._generative_step(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return self.validation_epoch_end(_SCREAMING_SNAKE_CASE , prefix='''test''' )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> SeqaSeqDataset:
A_ = self.n_obs[type_path]
A_ = self.target_lens[type_path]
A_ = self.dataset_class(
self.tokenizer , type_path=_SCREAMING_SNAKE_CASE , n_obs=_SCREAMING_SNAKE_CASE , max_target_length=_SCREAMING_SNAKE_CASE , **self.dataset_kwargs , )
return dataset
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> DataLoader:
A_ = self.get_dataset(_SCREAMING_SNAKE_CASE )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ = dataset.make_sortish_sampler(_SCREAMING_SNAKE_CASE , distributed=self.hparams.gpus > 1 )
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=_SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=_SCREAMING_SNAKE_CASE , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_sampler=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=_SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=_SCREAMING_SNAKE_CASE , )
def __A ( self ) -> DataLoader:
A_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_SCREAMING_SNAKE_CASE )
return dataloader
def __A ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def __A ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __A ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
BaseTransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
add_generic_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--max_tokens_per_batch''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--logger_name''' , type=_SCREAMING_SNAKE_CASE , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_SCREAMING_SNAKE_CASE , default=500 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_SCREAMING_SNAKE_CASE , default='''summarization''' , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_SCREAMING_SNAKE_CASE , default=0.0 , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--src_lang''' , type=_SCREAMING_SNAKE_CASE , default='''''' , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--tgt_lang''' , type=_SCREAMING_SNAKE_CASE , default='''''' , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--eval_beams''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--val_metric''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_SCREAMING_SNAKE_CASE , default=1 , required=_SCREAMING_SNAKE_CASE , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = 'translation'
__lowercase : Dict = ['loss']
__lowercase : Union[str, Any] = ['bleu']
__lowercase : Any = 'bleu'
def __init__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
super().__init__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ = hparams.src_lang
A_ = hparams.tgt_lang
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> dict:
return calculate_bleu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : str=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=_UpperCamelCase )
check_output_dir(_UpperCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ = SummarizationModule(_UpperCamelCase )
else:
A_ = TranslationModule(_UpperCamelCase )
A_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
A_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ = os.environ.get('''WANDB_PROJECT''', _UpperCamelCase )
A_ = WandbLogger(name=model.output_dir.name, project=_UpperCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
A_ = False
A_ = args.val_metric == '''loss'''
A_ = generic_train(
_UpperCamelCase, _UpperCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, _UpperCamelCase ), early_stopping_callback=_UpperCamelCase, logger=_UpperCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
A_ = ''''''
A_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=_UpperCamelCase ) )
if checkpoints:
A_ = checkpoints[-1]
A_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
__snake_case : List[Any] = pl.Trainer.add_argparse_args(parser)
__snake_case : Optional[int] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__snake_case : List[str] = parser.parse_args()
main(args)
| 364 | '''simple docstring'''
from collections import defaultdict
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
A_ = 1
A_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def _UpperCAmelCase ( ) -> Optional[Any]:
dfs(1 )
if __name__ == "__main__":
__snake_case , __snake_case : Union[str, Any] = 10, 9
__snake_case : int = defaultdict(list)
__snake_case : dict[int, bool] = {}
__snake_case : list[int] = []
__snake_case : Union[str, Any] = 0
__snake_case : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =value
__UpperCamelCase : Node | None =None
__UpperCamelCase : Node | None =None
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =tree
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
'''simple docstring'''
from collections.abc import Sequence
def __a(SCREAMING_SNAKE_CASE_ : Sequence[float] , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
if not arr:
return 0
_lowerCAmelCase = 0 if allow_empty_subarrays else float("-inf" )
_lowerCAmelCase = 0.0
for num in arr:
_lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
_lowerCAmelCase = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 158 | 0 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 365 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309 | 0 |
import qiskit
def a ( _UpperCAmelCase : int , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__UpperCAmelCase : int = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__UpperCAmelCase : Tuple = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
__A =single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 226 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A =logging.get_logger(__name__)
__A ={
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = """deberta-v2"""
def __init__( self : Optional[int] , a_ : List[str]=12_81_00 , a_ : Optional[Any]=15_36 , a_ : Optional[Any]=24 , a_ : List[Any]=24 , a_ : Optional[int]=61_44 , a_ : List[Any]="gelu" , a_ : Any=0.1 , a_ : Tuple=0.1 , a_ : Optional[Any]=5_12 , a_ : Tuple=0 , a_ : Dict=0.0_2 , a_ : Optional[Any]=1e-7 , a_ : List[str]=False , a_ : List[Any]=-1 , a_ : List[str]=0 , a_ : Optional[Any]=True , a_ : List[Any]=None , a_ : Optional[int]=0 , a_ : Tuple="gelu" , **a_ : List[str] , ):
'''simple docstring'''
super().__init__(**a_ )
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Optional[int] = relative_attention
__UpperCAmelCase : int = max_relative_positions
__UpperCAmelCase : Any = pad_token_id
__UpperCAmelCase : int = position_biased_input
# Backwards compatibility
if type(a_ ) == str:
__UpperCAmelCase : Optional[Any] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__UpperCAmelCase : Tuple = pos_att_type
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : str = kwargs.get('''pooler_hidden_size''' , a_ )
__UpperCAmelCase : Union[str, Any] = pooler_dropout
__UpperCAmelCase : Tuple = pooler_hidden_act
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
@property
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : int = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def snake_case__ ( self : Any , a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a_ : int = -1 , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 3 , a_ : int = 40 , a_ : int = 40 , a_ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = super().generate_dummy_inputs(preprocessor=a_ , framework=a_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 226 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase :
def __init__( self :int , _lowercase :List[Any] , _lowercase :int=13 , _lowercase :List[Any]=30 , _lowercase :Tuple=2 , _lowercase :Optional[Any]=3 , _lowercase :str=True , _lowercase :Dict=True , _lowercase :int=32 , _lowercase :Tuple=5 , _lowercase :Dict=4 , _lowercase :List[Any]=37 , _lowercase :List[Any]="gelu" , _lowercase :List[Any]=0.1 , _lowercase :int=0.1 , _lowercase :str=10 , _lowercase :Optional[int]=0.02 , _lowercase :Any=3 , _lowercase :List[Any]=0.6 , _lowercase :List[str]=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Optional[int] , _lowercase :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = ViTMAEModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self :Dict , _lowercase :Dict , _lowercase :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = ViTMAEForPreTraining(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase )
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTMAEForPreTraining(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(_lowercase )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__lowerCamelCase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = ViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Optional[int] , _lowercase :Optional[Any] , _lowercase :Any ):
'''simple docstring'''
np.random.seed(2 )
lowercase__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = torch.from_numpy(_lowercase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = pt_noise
super().check_pt_tf_models(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_lowercase , _lowercase ) )
lowercase__ = outputs[0].cpu().numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase )
lowercase__ = model_class.from_pretrained(_lowercase )
model.to(_lowercase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_lowercase , _lowercase ) )
# Make sure we don't have nans
lowercase__ = after_outputs[0].cpu().numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowercase , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTMAEModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _A ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
np.random.seed(2 )
lowercase__ = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(_lowercase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowercase__ = model(**_lowercase , noise=torch.from_numpy(_lowercase ).to(device=_lowercase ) )
# verify the logits
lowercase__ = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , _lowercase )
lowercase__ = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_lowercase ) , atol=1e-4 ) )
| 366 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 201 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase__ : str = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCamelCase = os.path.abspath('''examples''' )
for item in os.listdir(SCREAMING_SNAKE_CASE_ ):
if item not in EXCLUDE_EXAMPLES:
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ) and ".py" in item_path:
with self.subTest(
tested_script=SCREAMING_SNAKE_CASE_ , feature_script=SCREAMING_SNAKE_CASE_ , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCamelCase = compare_against_test(
os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = '''\n'''.join(SCREAMING_SNAKE_CASE_ )
if special_strings is not None:
for string in special_strings:
__UpperCamelCase = diff.replace(SCREAMING_SNAKE_CASE_ , '''''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''''' )
def A__ ( self )-> str:
'''simple docstring'''
self.one_complete_example('''complete_nlp_example.py''' , SCREAMING_SNAKE_CASE_ )
self.one_complete_example('''complete_nlp_example.py''' , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCamelCase = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.one_complete_example('''complete_cv_example.py''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = False
@classmethod
def A__ ( cls )-> Tuple:
'''simple docstring'''
super().setUpClass()
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def A__ ( cls )-> int:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
__UpperCamelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
__UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
self.assertNotIn('''epoch 0:''' , SCREAMING_SNAKE_CASE_ )
self.assertIn('''epoch 1:''' , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
__UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
if torch.cuda.is_available():
__UpperCamelCase = torch.cuda.device_count()
else:
__UpperCamelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , SCREAMING_SNAKE_CASE_ )
self.assertIn('''epoch 1:''' , SCREAMING_SNAKE_CASE_ )
else:
self.assertIn('''epoch 0:''' , SCREAMING_SNAKE_CASE_ )
self.assertIn('''epoch 1:''' , SCREAMING_SNAKE_CASE_ )
@slow
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = re.findall('''({.+})''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCamelCase = ast.literal_eval(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCamelCase = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''tracking''' ) ) )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 328 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowercase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'whisper'
_snake_case = ['past_key_values']
_snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = num_mel_bins
__UpperCamelCase = d_model
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = encoder_layers
__UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase = max_source_positions
__UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
__UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
__UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def A__ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__UpperCamelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
__UpperCamelCase = {0: '''batch'''}
else:
__UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
return common_inputs
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase = OrderedDict()
__UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = encoder_inputs['''input_features'''].shape[2]
__UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = encoder_inputs.pop('''input_features''' )
__UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
__UpperCamelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def A__ ( self )-> float:
'''simple docstring'''
return 1E-3
| 328 | 1 |
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowerCAmelCase , n - 1 , __lowerCAmelCase ) * a) % mod
else:
lowercase__: List[Any] = binary_exponentiation(__lowerCAmelCase , n / 2 , __lowerCAmelCase )
return (b * b) % mod
# a prime number
__lowerCAmelCase = 7_01
__lowerCAmelCase = 10_00_00_00_00
__lowerCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 362 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Optional[int] = CLIPTokenizer
__lowercase : str = CLIPTokenizerFast
__lowercase : Tuple = True
__lowercase : str = {}
__lowercase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__: str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase__: List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase__: int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
lowercase__: Optional[int] = {'unk_token': '<unk>'}
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = 'lower newer'
lowercase__: Dict = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Dict = 'lower newer'
lowercase__: Union[str, Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
lowercase__: Any = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Tuple = tokens + [tokenizer.unk_token]
lowercase__: Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__: List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
lowercase__: Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Dict = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase__: Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
lowercase__: Tuple = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: int = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
lowercase__: str = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase__: Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Tuple = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
lowercase__: str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase__: Optional[int] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Optional[int] = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__: Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__: Optional[int] = F'{text_of_1_token} {text_of_1_token}'
lowercase__: int = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
lowercase__: Dict = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
lowercase__: Any = F' {text}'
lowercase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
lowercase__: int = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# CLIP always lower cases letters
pass
| 288 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__magic_name__ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__magic_name__ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
__magic_name__ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self):
if version.parse(scb.__version__) < version.parse("""1.4.12"""):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ):
__SCREAMING_SNAKE_CASE = len(references[0])
if any(len(lowerCAmelCase__) != references_per_prediction for refs in references):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""")
__SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase__)]
__SCREAMING_SNAKE_CASE = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 100 |
"""simple docstring"""
from __future__ import annotations
__magic_name__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__magic_name__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , UpperCamelCase_ ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(UpperCamelCase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__magic_name__ = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 100 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case : Tuple = parent
__snake_case : Tuple = batch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[Any] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Dict = size
__snake_case : Dict = do_center_crop
__snake_case : Dict = crop_size
__snake_case : str = do_flip_channel_order
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''center_crop''' ) )
self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase__ :
'''simple docstring'''
def snake_case__ ( self : str , a_ : int ):
'''simple docstring'''
raise NotImplementedError()
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , a_ : "AutoTokenizer" , a_ : bool = False , **a_ : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = tokenizer
__UpperCAmelCase : Union[str, Any] = skip_prompt
__UpperCAmelCase : Optional[int] = decode_kwargs
# variables used in the streaming process
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : str = 0
__UpperCAmelCase : int = True
def snake_case__ ( self : int , a_ : Dict ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
__UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__UpperCAmelCase : Dict = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__UpperCAmelCase : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
__UpperCAmelCase : Tuple = text[self.print_len :]
__UpperCAmelCase : Any = []
__UpperCAmelCase : Tuple = 0
# If the last token is a CJK character, we print the characters.
elif len(a_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__UpperCAmelCase : List[Any] = text[self.print_len :]
self.print_len += len(a_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__UpperCAmelCase : Union[str, Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(a_ )
self.on_finalized_text(a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
if len(self.token_cache ) > 0:
__UpperCAmelCase : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__UpperCAmelCase : str = text[self.print_len :]
__UpperCAmelCase : int = []
__UpperCAmelCase : Any = 0
else:
__UpperCAmelCase : int = ''''''
__UpperCAmelCase : List[Any] = True
self.on_finalized_text(a_ , stream_end=a_ )
def snake_case__ ( self : Dict , a_ : str , a_ : bool = False ):
'''simple docstring'''
print(a_ , flush=a_ , end='''''' if not stream_end else None )
def snake_case__ ( self : str , a_ : Any ):
'''simple docstring'''
if (
(cp >= 0x4e_00 and cp <= 0x9f_ff)
or (cp >= 0x34_00 and cp <= 0x4d_bf) #
or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) #
or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) #
or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) #
or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) #
or (cp >= 0xf9_00 and cp <= 0xfa_ff)
or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) #
): #
return True
return False
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , a_ : "AutoTokenizer" , a_ : bool = False , a_ : Optional[float] = None , **a_ : Dict ):
'''simple docstring'''
super().__init__(a_ , a_ , **a_ )
__UpperCAmelCase : Dict = Queue()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = timeout
def snake_case__ ( self : int , a_ : str , a_ : bool = False ):
'''simple docstring'''
self.text_queue.put(a_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
return self
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 226 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = k_size // 2
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase : Any = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase : str = height - k_size + 1
__UpperCAmelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase : str = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase : Optional[Any] = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
__UpperCAmelCase : int = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase : Tuple = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[Any] = ravel(_UpperCAmelCase )
# reshape and get the dst image
__UpperCAmelCase : Optional[Any] = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__A =imread(R"../image_data/lena.jpg")
# turn image in gray scale value
__A =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__A =gaussian_filter(gray, 3, sigma=1)
__A =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 226 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__A = NewType("DataClass", Any)
__A = NewType("DataClassType", Any)
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
if isinstance(__a , __a ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase_ ( __a ) -> Callable[[str], Any]:
"""simple docstring"""
lowerCamelCase__: Optional[int] ={str(__a ): choice for choice in choices}
return lambda __a : str_to_choice.get(__a , __a )
def lowerCAmelCase_ ( *,
__a = None , __a = None , __a = dataclasses.MISSING , __a = dataclasses.MISSING , __a = None , **__a , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase__: Optional[Any] ={}
if aliases is not None:
lowerCamelCase__: str =aliases
if help is not None:
lowerCamelCase__: Dict =help
return dataclasses.field(metadata=__a , default=__a , default_factory=__a , **__a )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
def __init__(self : Optional[int] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
if "formatter_class" not in kwargs:
lowerCamelCase__: List[Any] =ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_)
if dataclasses.is_dataclass(UpperCAmelCase_):
lowerCamelCase__: Tuple =[dataclass_types]
lowerCamelCase__: str =list(UpperCAmelCase_)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_)
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =F"""--{field.name}"""
lowerCamelCase__: Union[str, Any] =field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default")
lowerCamelCase__: int =kwargs.pop("aliases" , [])
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Dict =[aliases]
lowerCamelCase__: Any =getattr(field.type , "__origin__" , field.type)
if origin_type is Union or (hasattr(UpperCAmelCase_ , "UnionType") and isinstance(UpperCAmelCase_ , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(UpperCAmelCase_) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F""" Problem encountered in field '{field.name}'.""")
if type(UpperCAmelCase_) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase__: str =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase__: Optional[int] =getattr(field.type , "__origin__" , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase__: Optional[Any] =(
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1]) else field.type.__args__[1]
)
lowerCamelCase__: List[Any] =getattr(field.type , "__origin__" , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase__: int ={}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_) and issubclass(field.type , UpperCAmelCase_)):
if origin_type is Literal:
lowerCamelCase__: List[str] =field.type.__args__
else:
lowerCamelCase__: Tuple =[x.value for x in field.type]
lowerCamelCase__: Tuple =make_choice_type_function(kwargs["choices"])
if field.default is not dataclasses.MISSING:
lowerCamelCase__: Optional[int] =field.default
else:
lowerCamelCase__: int =True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase__: List[Any] =copy(UpperCAmelCase_)
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase__: Tuple =string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase__: Any =False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase__: Tuple =default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase__: Dict ="?"
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase__: int =True
elif isclass(UpperCAmelCase_) and issubclass(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Optional[Any] =field.type.__args__[0]
lowerCamelCase__: List[str] ="+"
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase__: List[str] =field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase__: Any =True
else:
lowerCamelCase__: List[str] =field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase__: int =field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase__: Tuple =field.default_factory()
else:
lowerCamelCase__: int =True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase__: Union[str, Any] =False
parser.add_argument(F"""--no_{field.name}""" , action="store_false" , dest=field.name , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : DataClassType) ->List[str]:
'''simple docstring'''
if hasattr(UpperCAmelCase_ , "_argument_group_name"):
lowerCamelCase__: Dict =self.add_argument_group(dtype._argument_group_name)
else:
lowerCamelCase__: List[str] =self
try:
lowerCamelCase__: Dict[str, type] =get_type_hints(UpperCAmelCase_)
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)")
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_):
lowerCamelCase__: Optional[int] =".".join(map(UpperCAmelCase_ , sys.version_info[:3]))
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`.") from ex
raise
for field in dataclasses.fields(UpperCAmelCase_):
if not field.init:
continue
lowerCamelCase__: int =type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , ) ->Tuple[DataClass, ...]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
lowerCamelCase__: Tuple =[]
if args_filename:
args_files.append(Path(UpperCAmelCase_))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix(".args"))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase__: Tuple =ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action="append")
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =args_file_parser.parse_known_args(args=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =vars(UpperCAmelCase_).get(args_file_flag.lstrip("-") , UpperCAmelCase_)
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_) for p in cmd_args_file_paths])
lowerCamelCase__: List[str] =[]
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase__: Optional[int] =file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase__ , lowerCamelCase__: str =self.parse_known_args(args=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[]
for dtype in self.dataclass_types:
lowerCamelCase__: Dict ={f.name for f in dataclasses.fields(UpperCAmelCase_) if f.init}
lowerCamelCase__: List[str] ={k: v for k, v in vars(UpperCAmelCase_).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[Any] =dtype(**UpperCAmelCase_)
outputs.append(UpperCAmelCase_)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""")
return (*outputs,)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False) ->Tuple[DataClass, ...]:
'''simple docstring'''
lowerCamelCase__: Any =set(args.keys())
lowerCamelCase__: int =[]
for dtype in self.dataclass_types:
lowerCamelCase__: int ={f.name for f in dataclasses.fields(UpperCAmelCase_) if f.init}
lowerCamelCase__: List[str] ={k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
lowerCamelCase__: List[str] =dtype(**UpperCAmelCase_)
outputs.append(UpperCAmelCase_)
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_)}""")
return tuple(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False) ->Tuple[DataClass, ...]:
'''simple docstring'''
with open(Path(UpperCAmelCase_) , encoding="utf-8") as open_json_file:
lowerCamelCase__: Any =json.loads(open_json_file.read())
lowerCamelCase__: Tuple =self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False) ->Tuple[DataClass, ...]:
'''simple docstring'''
lowerCamelCase__: Dict =self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_).read_text()) , allow_extra_keys=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 273 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "torchsde"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int]) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
| 273 | 1 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
UpperCAmelCase : Dict = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
UpperCAmelCase : Tuple = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
UpperCAmelCase : str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def lowerCamelCase__ ( self : int , UpperCamelCase : List[str]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Dict=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(UpperCamelCase , UpperCamelCase )["wer"]
else:
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Union[str, Any] = 0
for prediction, reference in zip(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : List[Any] = compute_measures(UpperCamelCase , UpperCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 115 |
"""simple docstring"""
from collections.abc import Callable
def lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
__UpperCAmelCase : float = a
__UpperCAmelCase : float = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
__UpperCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
__UpperCAmelCase : Union[str, Any] = mid
else:
__UpperCAmelCase : List[Any] = mid
__UpperCAmelCase : Optional[Any] = start + (end - start) / 2.0
return mid
def lowerCamelCase ( _UpperCamelCase : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 115 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '''▁'''
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''spiece.model'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
_SCREAMING_SNAKE_CASE = {
'''google/reformer-crime-and-punishment''': 5_2_4_2_8_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[str] = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase ,_lowerCamelCase="</s>" ,_lowerCamelCase="<unk>" ,_lowerCamelCase=[] ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase ,unk_token=_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCamelCase ,)
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _UpperCAmelCase (self ) -> Dict[str, int]:
'''simple docstring'''
__lowercase = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Any:
'''simple docstring'''
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__(self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase ,out_type=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__lowercase = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = []
__lowercase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
__lowercase = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
_lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 217 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : List[Any] = CanineTokenizer
a : Union[str, Any] = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
__lowercase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> CanineTokenizer:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
__lowercase = 1024
return tokenizer
@require_torch
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
__lowercase = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
__lowercase = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors='''pt''' )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual((2, 39) ,batch.input_ids.shape )
self.assertEqual((2, 39) ,batch.attention_mask.shape )
@require_torch
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
__lowercase = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' ,_lowerCamelCase )
self.assertIn('''attention_mask''' ,_lowerCamelCase )
self.assertIn('''token_type_ids''' ,_lowerCamelCase )
@require_torch
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
__lowercase = tokenizer(
text_target=_lowerCamelCase ,max_length=32 ,padding='''max_length''' ,truncation=_lowerCamelCase ,return_tensors='''pt''' )
self.assertEqual(32 ,targets['''input_ids'''].shape[1] )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
__lowercase = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
__lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__lowercase = chr(0xe_0_0_7 )
additional_special_tokens.append(_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
__lowercase = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertIn(_lowerCamelCase ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase , __lowercase = self.get_clean_sequence(_lowerCamelCase )
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_5
__lowercase = chr(_lowerCamelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,1 )
__lowercase = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,input_encoded + special_token_id )
__lowercase = tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = chr(0xe_0_0_5 )
__lowercase = chr(0xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=_lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
__lowercase = tokenizer.tokenize(_lowerCamelCase )
__lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,1 )
self.assertEqual(len(_lowerCamelCase ) ,1 )
self.assertEqual(token_a[0] ,_lowerCamelCase )
self.assertEqual(token_a[0] ,_lowerCamelCase )
@require_tokenizers
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
__lowercase = AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCamelCase )
tokenizer.from_pretrained(_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''special_tokens_map.json''' ) ,encoding='''utf-8''' ) as json_file:
__lowercase = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''tokenizer_config.json''' ) ,encoding='''utf-8''' ) as json_file:
__lowercase = json.load(_lowerCamelCase )
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
__lowercase = [new_token_a]
__lowercase = [new_token_a]
with open(os.path.join(_lowerCamelCase ,'''special_tokens_map.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''tokenizer_config.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowercase = tokenizer_class.from_pretrained(_lowerCamelCase ,extra_ids=0 )
self.assertIn(_lowerCamelCase ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,)
__lowercase = 0xe_0_0_7
__lowercase = chr(_lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowercase = [AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase )]
__lowercase = tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,extra_ids=0 )
self.assertIn(_lowerCamelCase ,tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = '''hello world'''
if self.space_between_special_tokens:
__lowercase = '''[CLS] hello world [SEP]'''
else:
__lowercase = input
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.decode(_lowerCamelCase ,spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCamelCase ,[output, output.lower()] )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__lowercase = '''a'''
__lowercase = ord(_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase ,attr + '''_id''' ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,attr + '''_id''' ) ,_lowerCamelCase )
setattr(_lowerCamelCase ,attr + '''_id''' ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,attr + '''_id''' ) ,_lowerCamelCase )
setattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ,[] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens''' ) ,[] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[] )
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
setattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ,[additional_special_token_id] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens''' ) ,[additional_special_token] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[additional_special_token_id] )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
pass
| 217 | 1 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : List[Any] = logging.get_logger()
@dataclass
class _snake_case :
UpperCamelCase__ = 42
UpperCamelCase__ = field(default_factory=lowerCamelCase__ )
UpperCamelCase__ = field(default_factory=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Union[str, Any] = len(list(m.modules() ) ) == 1 or isinstance(__snake_case , nn.Convad ) or isinstance(__snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__snake_case )
def __call__( self , _a ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__snake_case )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _snake_case :
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 0
UpperCamelCase__ = field(default_factory=lowerCamelCase__ )
UpperCamelCase__ = field(default_factory=lowerCamelCase__ )
def __call__( self , _a ):
__magic_name__ : List[Any] = Tracker(self.dest )(__snake_case ).parametrized
__magic_name__ : Tuple = Tracker(self.src )(__snake_case ).parametrized
__magic_name__ : Tuple = list(filter(lambda _a : type(__snake_case ) not in self.src_skip , __snake_case ) )
__magic_name__ : Optional[int] = list(filter(lambda _a : type(__snake_case ) not in self.dest_skip , __snake_case ) )
if len(__snake_case ) != len(__snake_case ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(__snake_case )} operations while'''
f''' destination module has {len(__snake_case )}.''' )
for dest_m, src_m in zip(__snake_case , __snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Optional[int] = True ) -> Union[str, Any]:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
__magic_name__ : Optional[int] = timm.create_model(_A , pretrained=_A ).eval()
__magic_name__ : List[Any] = ResNetForImageClassification(_A ).eval()
__magic_name__ : Any = ModuleTransfer(src=_A , dest=_A )
__magic_name__ : Tuple = torch.randn((1, 3, 224, 224) )
module_transfer(_A )
assert torch.allclose(from_model(_A ) , our_model(_A ).logits ), "The model logits don't match the original one."
__magic_name__ : Optional[int] = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(_A )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_A , )
# we can use the convnext one
__magic_name__ : int = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_A , )
print(F'''Pushed {checkpoint_name}''' )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : List[Any] = None , _snake_case : Optional[Any] = True ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = 'imagenet-1k-id2label.json'
__magic_name__ : Optional[int] = 1000
__magic_name__ : str = (1, num_labels)
__magic_name__ : Union[str, Any] = 'huggingface/label-files'
__magic_name__ : List[Any] = num_labels
__magic_name__ : List[str] = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
__magic_name__ : Any = {int(_A ): v for k, v in idalabel.items()}
__magic_name__ : Optional[int] = idalabel
__magic_name__ : List[str] = {v: k for k, v in idalabel.items()}
__magic_name__ : Optional[int] = partial(_A , num_labels=_A , idalabel=_A , labelaid=_A )
__magic_name__ : Tuple = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(_A , names_to_config[model_name] , _A , _A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_A , _A , _A , _A )
return config, expected_shape
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
snake_case : int = parser.parse_args()
snake_case : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 357 |
from typing import Any
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Union[str, Any] = data
__magic_name__ : str = None
class _snake_case :
def __init__( self ):
__magic_name__ : List[str] = None
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.head
while temp is not None:
print(temp.data , end=" " )
__magic_name__ : Optional[int] = temp.next
print()
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = Node(_a )
__magic_name__ : List[str] = self.head
__magic_name__ : Union[str, Any] = new_node
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
if node_data_a == node_data_a:
return
else:
__magic_name__ : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ : Tuple = node_a.next
__magic_name__ : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ : List[Any] = node_a.next
if node_a is None or node_a is None:
return
__magic_name__ , __magic_name__ : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
snake_case : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 41 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
a__ = """pixel_values"""
a__ = False
a__ = TimmBackboneConfig
def __init__( self : str , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
requires_backends(self , """timm""" )
super().__init__(UpperCamelCase__ )
__magic_name__ = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(UpperCamelCase__ , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
__magic_name__ = getattr(UpperCamelCase__ , """use_pretrained_backbone""" , UpperCamelCase__ )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
__magic_name__ = config.out_indices if getattr(UpperCamelCase__ , """out_indices""" , UpperCamelCase__ ) is not None else (-1,)
__magic_name__ = timm.create_model(
config.backbone , pretrained=UpperCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase__ , **UpperCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__magic_name__ = self._backbone.return_layers
__magic_name__ = {layer["""module"""]: str(UpperCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCamelCase__ )
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
__magic_name__ = kwargs.pop("""config""" , TimmBackboneConfig() )
__magic_name__ = kwargs.pop("""use_timm_backbone""" , UpperCamelCase__ )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
__magic_name__ = kwargs.pop("""num_channels""" , config.num_channels )
__magic_name__ = kwargs.pop("""features_only""" , config.features_only )
__magic_name__ = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
__magic_name__ = kwargs.pop("""out_indices""" , config.out_indices )
__magic_name__ = TimmBackboneConfig(
backbone=UpperCamelCase__ , num_channels=UpperCamelCase__ , features_only=UpperCamelCase__ , use_pretrained_backbone=UpperCamelCase__ , out_indices=UpperCamelCase__ , )
return super()._from_config(UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
pass
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int=None , **UpperCamelCase__ : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
"""simple docstring"""
__magic_name__ = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__magic_name__ = self._all_layers
__magic_name__ = self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = self._return_layers
__magic_name__ = tuple(hidden_states[i] for i in self.out_indices )
else:
__magic_name__ = self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = None
__magic_name__ = tuple(UpperCamelCase__ )
__magic_name__ = tuple(UpperCamelCase__ ) if hidden_states is not None else None
if not return_dict:
__magic_name__ = (feature_maps,)
if output_hidden_states:
__magic_name__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCamelCase__ , hidden_states=UpperCamelCase__ , attentions=UpperCamelCase__ )
| 88 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = torch.nn.Linear(10 , 10 )
__magic_name__ = torch.optim.SGD(model.parameters() , 0.1 )
__magic_name__ = Accelerator()
__magic_name__ = accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 88 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : Any , *_a : int , **_a : Tuple ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *_a : int , **_a : Union[str, Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Any , *_a : List[str] , **_a : int ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : Any , *_a : Dict , **_a : Tuple ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Dict , *_a : int , **_a : List[str] ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *_a : Any , **_a : Union[str, Any] ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : Optional[int] , *_a : List[Any] , **_a : List[str] ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *_a : str , **_a : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : List[str] , *_a : Optional[Any] , **_a : int ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : List[Any] , *_a : Dict , **_a : str ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Dict , *_a : str , **_a : int ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : str , *_a : str , **_a : Optional[int] ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : int , *_a : List[Any] , **_a : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *_a : List[Any] , **_a : Any ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : List[str] , *_a : str , **_a : Union[str, Any] ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : Optional[int] , *_a : Optional[int] , **_a : str ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : str , *_a : Union[str, Any] , **_a : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : int , *_a : Any , **_a : Optional[Any] ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : Optional[int] , *_a : List[Any] , **_a : Union[str, Any] ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *_a : Optional[Any] , **_a : Union[str, Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Tuple , *_a : Any , **_a : Tuple ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : int , *_a : List[str] , **_a : Tuple ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : str , *_a : int , **_a : str ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : str , *_a : Any , **_a : List[Any] ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : Union[str, Any] , *_a : int , **_a : int ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : int , *_a : int , **_a : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : str , *_a : str , **_a : Union[str, Any] ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : int , *_a : List[Any] , **_a : Union[str, Any] ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : int , *_a : List[str] , **_a : Any ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : int , *_a : Optional[int] , **_a : Any ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : List[str] , *_a : str , **_a : Dict ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Dict , *_a : Optional[Any] , **_a : int ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *_a : Dict , **_a : Dict ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : List[Any] , *_a : Union[str, Any] , **_a : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *_a : int , **_a : Union[str, Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *_a : Tuple , **_a : int ):
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''flax''']
def __init__( self : Dict , *_a : Dict , **_a : Any ):
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : List[str] , *_a : List[Any] , **_a : int ):
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *_a : int , **_a : Tuple ):
requires_backends(cls , ["flax"] )
| 42 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowerCamelCase ( __magic_name__ : List[Any] ):
if hor == 128:
a__: Union[str, Any] =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: Optional[int] =(32, 128, 256)
a__: Tuple =("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
a__: Tuple =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: List[Any] =(32, 64, 128, 256)
a__: List[str] =("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
a__: Dict =torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
a__: Optional[int] =model.state_dict()
a__: str ={
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
a__: List[Any] =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: Tuple =dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: Dict =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def __lowerCamelCase ( ):
a__: Union[str, Any] ={
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
a__: Union[str, Any] =torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
a__: Any =model
a__: str =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: List[str] =dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: List[Any] =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_A : Tuple =logging.get_logger(__name__)
_A : Optional[Any] ={
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class _lowercase ( A__ ):
a = """t5"""
a = ["""past_key_values"""]
a = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self: Union[str, Any] , UpperCamelCase__: str=32_128 , UpperCamelCase__: List[Any]=512 , UpperCamelCase__: Optional[int]=64 , UpperCamelCase__: List[Any]=2_048 , UpperCamelCase__: Optional[Any]=6 , UpperCamelCase__: List[str]=None , UpperCamelCase__: List[str]=8 , UpperCamelCase__: int=32 , UpperCamelCase__: Dict=128 , UpperCamelCase__: str=0.1 , UpperCamelCase__: Dict=1e-6 , UpperCamelCase__: List[Any]=1.0 , UpperCamelCase__: str="relu" , UpperCamelCase__: List[str]=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: List[Any]=0 , UpperCamelCase__: Union[str, Any]=1 , **UpperCamelCase__: List[Any] , ):
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : List[str] = d_kv
lowerCamelCase__ : Union[str, Any] = d_ff
lowerCamelCase__ : List[Any] = num_layers
lowerCamelCase__ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase__ : str = num_heads
lowerCamelCase__ : Optional[int] = relative_attention_num_buckets
lowerCamelCase__ : Dict = relative_attention_max_distance
lowerCamelCase__ : Optional[int] = dropout_rate
lowerCamelCase__ : str = layer_norm_epsilon
lowerCamelCase__ : str = initializer_factor
lowerCamelCase__ : Tuple = feed_forward_proj
lowerCamelCase__ : Tuple = use_cache
lowerCamelCase__ : List[Any] = self.feed_forward_proj.split("""-""" )
lowerCamelCase__ : Any = act_info[-1]
lowerCamelCase__ : Dict = act_info[0] == """gated"""
if len(__lowerCamelCase ) > 1 and act_info[0] != "gated" or len(__lowerCamelCase ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase__ : Dict = """gelu_new"""
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase , )
class _lowercase ( A__ ):
@property
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowerCamelCase__ : List[Any] = """past_encoder_sequence + sequence"""
lowerCamelCase__ : str = {0: """batch"""}
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCamelCase__ : Dict = {0: """batch""", 1: """decoder_sequence"""}
lowerCamelCase__ : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="""inputs""" )
return common_inputs
@property
def lowerCamelCase_ ( self: Tuple ):
return 13
| 41 |
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314 | 0 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase : Optional[Any] = logging.getLogger(__name__)
def lowerCamelCase__ ( A : torch.nn.Module , A : BnbQuantizationConfig , A : Union[str, os.PathLike] = None , A : Optional[Dict[str, Union[int, str, torch.device]]] = None , A : Optional[List[str]] = None , A : Optional[Dict[Union[int, str], Union[int, str]]] = None , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , ):
'''simple docstring'''
UpperCAmelCase = bnb_quantization_config.load_in_abit
UpperCAmelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCAmelCase = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
UpperCAmelCase = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCAmelCase = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
UpperCAmelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCAmelCase = []
UpperCAmelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
UpperCAmelCase = load_in_abit
UpperCAmelCase = load_in_abit
UpperCAmelCase = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCAmelCase = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
UpperCAmelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCAmelCase = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCAmelCase = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
UpperCAmelCase = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
UpperCAmelCase = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCAmelCase = True
UpperCAmelCase = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict , A : Union[str, Any]=None , A : Optional[Any]=None , A : Optional[int]=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
UpperCAmelCase = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCAmelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCAmelCase = {}
UpperCAmelCase = special_dtypes
UpperCAmelCase = no_split_module_classes
UpperCAmelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCAmelCase = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
UpperCAmelCase = max_memory
UpperCAmelCase = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
UpperCAmelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCAmelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def lowerCamelCase__ ( A : str , A : Union[str, Any] , A : str=None , A : Tuple=None ):
'''simple docstring'''
if modules_to_not_convert is None:
UpperCAmelCase = []
UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowerCamelCase__ ( A : Optional[int] , A : List[str] , A : List[Any]=None , A : Tuple=None , ):
'''simple docstring'''
UpperCAmelCase = False
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCAmelCase = '''.'''.join(A )
UpperCAmelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCAmelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCAmelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCAmelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCAmelCase = module.weight.data
if module.bias is not None:
UpperCAmelCase = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
UpperCAmelCase = True
if len(list(module.children() ) ) > 0:
UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_layers(
A , A , A , A )
UpperCAmelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
with init_empty_weights():
UpperCAmelCase = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCAmelCase = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
UpperCAmelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase = sum(A , [] )
UpperCAmelCase = len(A ) > 0
# Check if it is a base model
UpperCAmelCase = False
if hasattr(A , '''base_model_prefix''' ):
UpperCAmelCase = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase = list(model.named_children() )
UpperCAmelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase = set(A ) - set(A )
UpperCAmelCase = list(set(A ) ) + list(A )
# remove ".weight" from the keys
UpperCAmelCase = ['''.weight''', '''.bias''']
UpperCAmelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def lowerCamelCase__ ( A : nn.Module ):
'''simple docstring'''
return next(parameter.parameters() ).device
def lowerCamelCase__ ( A : str , A : Tuple , A : Tuple , A : Optional[Any] , A : int , A : Optional[int] , A : List[Any] ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
UpperCAmelCase = param_name
UpperCAmelCase = model
if "." in tensor_name:
UpperCAmelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCAmelCase = getattr(A , A )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
UpperCAmelCase = new_module
UpperCAmelCase = splits[-1]
# offload weights
UpperCAmelCase = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) )
| 91 |
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
while a != 0:
UpperCAmelCase , UpperCAmelCase = b % a, a
return b
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
if gcd(A , A ) != 1:
UpperCAmelCase = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1, 0, a
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0, 1, m
while va != 0:
UpperCAmelCase = ua // va
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 91 | 1 |
def A ( a_ ,a_ ) -> int:
return number | (1 << position)
def A ( a_ ,a_ ) -> int:
return number & ~(1 << position)
def A ( a_ ,a_ ) -> int:
return number ^ (1 << position)
def A ( a_ ,a_ ) -> bool:
return ((number >> position) & 1) == 1
def A ( a_ ,a_ ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __A :
"""simple docstring"""
UpperCamelCase__ : int =XGLMConfig
UpperCamelCase__ : Optional[Any] ={}
UpperCamelCase__ : List[str] ="""gelu"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ):
"""simple docstring"""
__UpperCamelCase : Tuple =parent
__UpperCamelCase : List[str] =batch_size
__UpperCamelCase : str =seq_length
__UpperCamelCase : Dict =is_training
__UpperCamelCase : Tuple =use_input_mask
__UpperCamelCase : List[Any] =use_labels
__UpperCamelCase : Any =vocab_size
__UpperCamelCase : List[Any] =d_model
__UpperCamelCase : Optional[int] =num_hidden_layers
__UpperCamelCase : List[str] =num_attention_heads
__UpperCamelCase : Optional[int] =ffn_dim
__UpperCamelCase : str =activation_function
__UpperCamelCase : Any =activation_dropout
__UpperCamelCase : Optional[int] =attention_dropout
__UpperCamelCase : Optional[int] =max_position_embeddings
__UpperCamelCase : Any =initializer_range
__UpperCamelCase : Dict =None
__UpperCamelCase : Optional[int] =0
__UpperCamelCase : Optional[Any] =2
__UpperCamelCase : str =1
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__UpperCamelCase : Union[str, Any] =None
if self.use_input_mask:
__UpperCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Any =self.get_config()
__UpperCamelCase : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : int =config_and_inputs
__UpperCamelCase : Optional[Any] ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ : str =(TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ : Optional[Any] =(
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Optional[Any] =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMModelTester(self )
__UpperCamelCase : Dict =ConfigTester(self , config_class=lowerCamelCase__ , n_embd=37 )
def __lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[Any] =TFXGLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self , lowerCamelCase__=True ):
"""simple docstring"""
__UpperCamelCase : int =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : List[str] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase : str =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase : Optional[Any] =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Union[str, Any] =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase : str =tokenizer('Today is a nice day and' , return_tensors='tf' )
__UpperCamelCase : Union[str, Any] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase : Any =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , seed=[7, 0] )
__UpperCamelCase : Tuple =tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : List[Any] =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] ='left'
# use different length sentences to test batching
__UpperCamelCase : Optional[int] =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase : List[Any] =tokenizer(lowerCamelCase__ , return_tensors='tf' , padding=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =inputs['input_ids']
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__UpperCamelCase : List[Any] =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Any =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__UpperCamelCase : Optional[Any] =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Optional[int] =tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : int =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Any =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence] )
| 71 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = "naver-clova-ix/donut-base-finetuned-docvqa"
lowerCamelCase__: Union[str, Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
lowerCamelCase__: int = "document_qa"
lowerCamelCase__: List[str] = AutoProcessor
lowerCamelCase__: List[str] = VisionEncoderDecoderModel
lowerCamelCase__: str = ["image", "text"]
lowerCamelCase__: Union[str, Any] = ["text"]
def __init__( self: str , *__lowerCamelCase: List[Any] , **__lowerCamelCase: Optional[int] ) -> List[str]:
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: "Image" , __lowerCamelCase: str ) -> int:
__UpperCAmelCase : Optional[int] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
__UpperCAmelCase : Union[str, Any] = task_prompt.replace("{user_input}" , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.pre_processor.tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors="pt" ).input_ids
__UpperCAmelCase : Union[str, Any] = self.pre_processor(__lowerCamelCase , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[str] ) -> Optional[Any]:
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowerCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowerCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowerCamelCase , ).sequences
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Any ) -> Union[str, Any]:
__UpperCAmelCase : str = self.pre_processor.batch_decode(__lowerCamelCase )[0]
__UpperCAmelCase : Tuple = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
__UpperCAmelCase : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
__UpperCAmelCase : Optional[Any] = re.sub(R"<.*?>" , "" , __lowerCamelCase , count=1 ).strip() # remove first task start token
__UpperCAmelCase : Union[str, Any] = self.pre_processor.tokenajson(__lowerCamelCase )
return sequence["answer"]
| 369 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.