code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : Union[PIL.Image.Image, np.ndarray]
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowercase__ , lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowercase__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 43 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mobilenet_v1'''
def __init__( self: List[Any] , UpperCamelCase_: Any=3 , UpperCamelCase_: int=224 , UpperCamelCase_: Optional[int]=1.0 , UpperCamelCase_: int=8 , UpperCamelCase_: str="relu6" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[Any]=0.999 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: Union[str, Any]=0.001 , **UpperCamelCase_: Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = min_depth
lowercase__ = hidden_act
lowercase__ = tf_padding
lowercase__ = classifier_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self: List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def lowerCamelCase_ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def lowerCamelCase_ ( self: Any ) -> float:
"""simple docstring"""
return 1E-4
| 43 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
def __init__( self: Union[str, Any] , UpperCamelCase_: Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ = nn.ModuleList(UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[torch.Tensor, float, int] , UpperCamelCase_: torch.Tensor , UpperCamelCase_: List[torch.tensor] , UpperCamelCase_: List[float] , UpperCamelCase_: Optional[torch.Tensor] = None , UpperCamelCase_: Optional[torch.Tensor] = None , UpperCamelCase_: Optional[torch.Tensor] = None , UpperCamelCase_: Optional[Dict[str, Any]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , ) -> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_ , self.nets ) ):
lowercase__ , lowercase__ = controlnet(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# merge samples
if i == 0:
lowercase__ , lowercase__ = down_samples, mid_sample
else:
lowercase__ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCamelCase_ , UpperCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, os.PathLike] , UpperCamelCase_: bool = True , UpperCamelCase_: Callable = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , ) -> Any:
"""simple docstring"""
lowercase__ = 0
lowercase__ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCamelCase_ , is_main_process=UpperCamelCase_ , save_function=UpperCamelCase_ , safe_serialization=UpperCamelCase_ , variant=UpperCamelCase_ , )
idx += 1
lowercase__ = model_path_to_save + f'_{idx}'
@classmethod
def lowerCamelCase_ ( cls: int , UpperCamelCase_: Optional[Union[str, os.PathLike]] , **UpperCamelCase_: Any ) -> List[str]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowercase__ = pretrained_model_path
while os.path.isdir(UpperCamelCase_ ):
lowercase__ = ControlNetModel.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
controlnets.append(UpperCamelCase_ )
idx += 1
lowercase__ = pretrained_model_path + f'_{idx}'
logger.info(f'{len(UpperCamelCase_ )} controlnets loaded from {pretrained_model_path}.' )
if len(UpperCamelCase_ ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(UpperCamelCase_ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(UpperCamelCase_ )
| 43 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 43 | 1 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = StableDiffusionControlNetImgaImgPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_lowercase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self: str ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowercase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(UpperCamelCase_ )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = 2
lowercase__ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , )
lowercase__ = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = StableDiffusionControlNetImgaImgPipeline
_lowercase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowercase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCamelCase_: Optional[int] ):
if isinstance(UpperCamelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase_ )
torch.manual_seed(0 )
lowercase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase_ )
torch.manual_seed(0 )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(UpperCamelCase_ )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = MultiControlNetModel([controlneta, controlneta] )
lowercase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = 2
lowercase__ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ),
]
lowercase__ = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
lowercase__ = 10.0
lowercase__ = 4
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
lowercase__ = steps
lowercase__ = scale
lowercase__ = pipe(**UpperCamelCase_ )[0]
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
lowercase__ = steps
lowercase__ = scale
lowercase__ = pipe(**UpperCamelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
lowercase__ = steps
lowercase__ = scale
lowercase__ = pipe(**UpperCamelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
lowercase__ = steps
lowercase__ = scale
lowercase__ = pipe(**UpperCamelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self: Dict ) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCamelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
lowercase__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCamelCase_ , controlnet=UpperCamelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ = '''evil space-punk bird'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
lowercase__ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
lowercase__ = pipe(
UpperCamelCase_ , UpperCamelCase_ , control_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = math.inf , SCREAMING_SNAKE_CASE = -math.inf , SCREAMING_SNAKE_CASE = math.inf , SCREAMING_SNAKE_CASE = -math.inf , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 1_00 , SCREAMING_SNAKE_CASE = 0.01 , SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
lowercase__ = False
lowercase__ = search_prob
lowercase__ = start_temperate
lowercase__ = []
lowercase__ = 0
lowercase__ = None
while not search_end:
lowercase__ = current_state.score()
if best_state is None or current_score > best_state.score():
lowercase__ = current_state
scores.append(SCREAMING_SNAKE_CASE )
iterations += 1
lowercase__ = None
lowercase__ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase__ = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
lowercase__ = neighbors.pop(SCREAMING_SNAKE_CASE )
lowercase__ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase__ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase__ = picked_neighbor
else:
lowercase__ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase__ = picked_neighbor
lowercase__ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase__ = True
else:
lowercase__ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 1 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 | 1 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(SCREAMING_SNAKE_CASE )
lowercase__ = list(SCREAMING_SNAKE_CASE )
lowercase__ = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
lowercase__ = '''_'''
if count > 1:
return False
else:
return "".join(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
while True:
lowercase__ = ['''$'''] * len(SCREAMING_SNAKE_CASE )
lowercase__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = compare_string(binary[i] , binary[j] )
if k is False:
lowercase__ = '''*'''
lowercase__ = '''*'''
temp.append('''X''' )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(SCREAMING_SNAKE_CASE ) == 0:
return pi
lowercase__ = list(set(SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
for minterm in minterms:
lowercase__ = ''''''
for _ in range(SCREAMING_SNAKE_CASE ):
lowercase__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(SCREAMING_SNAKE_CASE )
return temp
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(SCREAMING_SNAKE_CASE )
lowercase__ = list(SCREAMING_SNAKE_CASE )
lowercase__ = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = [0] * len(SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
lowercase__ = 0
lowercase__ = -1
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
lowercase__ = j
if count == 1:
lowercase__ = 1
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = 0
temp.append(prime_implicants[i] )
while True:
lowercase__ = 0
lowercase__ = -1
lowercase__ = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = chart[i].count(1 )
if count_n > max_n:
lowercase__ = count_n
lowercase__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = 0
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for x in range(len(SCREAMING_SNAKE_CASE ) )] for x in range(len(SCREAMING_SNAKE_CASE ) )]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = prime_implicants[i].count('''_''' )
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , SCREAMING_SNAKE_CASE ):
lowercase__ = 1
return chart
def _a ( ):
"""simple docstring"""
lowercase__ = int(input('''Enter the no. of variables\n''' ) )
lowercase__ = [
float(SCREAMING_SNAKE_CASE )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
lowercase__ = decimal_to_binary(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = check(SCREAMING_SNAKE_CASE )
print('''Prime Implicants are:''' )
print(SCREAMING_SNAKE_CASE )
lowercase__ = prime_implicant_chart(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = selection(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('''Essential Prime Implicants are:''' )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = '''informer'''
_lowercase : int = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: Dict , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "student_t" , UpperCamelCase_: str = "nll" , UpperCamelCase_: int = 1 , UpperCamelCase_: List[int] = None , UpperCamelCase_: Optional[Union[str, bool]] = "mean" , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: int = 64 , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: bool = True , UpperCamelCase_: str = "gelu" , UpperCamelCase_: float = 0.05 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: int = 100 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: Tuple=True , UpperCamelCase_: str = "prob" , UpperCamelCase_: int = 5 , UpperCamelCase_: bool = True , **UpperCamelCase_: Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = prediction_length
lowercase__ = context_length or prediction_length
lowercase__ = distribution_output
lowercase__ = loss
lowercase__ = input_size
lowercase__ = num_time_features
lowercase__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ = scaling
lowercase__ = num_dynamic_real_features
lowercase__ = num_static_real_features
lowercase__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = cardinality
else:
lowercase__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = embedding_dimension
else:
lowercase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ = num_parallel_samples
# Transformer architecture configuration
lowercase__ = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ = d_model
lowercase__ = encoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = encoder_ffn_dim
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = decoder_layers
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = use_cache
# Informer
lowercase__ = attention_type
lowercase__ = sampling_factor
lowercase__ = distil
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 43 |
class _a :
def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 43 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
lowercase__ = re.match(R'''^mobilenet_v1_([^_]*)_([^_]*)$''' , SCREAMING_SNAKE_CASE )
if matches:
lowercase__ = float(matches[1] )
lowercase__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase__ = 10_01
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = '''huggingface/label-files'''
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
lowercase__ = '''background'''
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = get_mobilenet_va_config(SCREAMING_SNAKE_CASE )
# Load 🤗 model
lowercase__ = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase__ = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
lowercase__ = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase__ = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
lowercase__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print('''Pushing to the hub...''' )
lowercase__ = '''google/''' + model_name
image_processor.push_to_hub(SCREAMING_SNAKE_CASE )
model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 43 |
lowerCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase = {value: key for key, value in encode_dict.items()}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowercase__ = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
lowerCAmelCase = {
'google/pegasus-xsum': 512,
}
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = PegasusTokenizer
_lowercase : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , UpperCamelCase_: int=None , UpperCamelCase_: Dict=None , UpperCamelCase_: Tuple="<pad>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Tuple="<unk>" , UpperCamelCase_: Optional[int]="<mask_2>" , UpperCamelCase_: Dict="<mask_1>" , UpperCamelCase_: Any=None , UpperCamelCase_: int=103 , **UpperCamelCase_: List[str] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError(
f'additional_special_tokens should be of type {type(UpperCamelCase_ )}, but is'
f' {type(UpperCamelCase_ )}' )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(UpperCamelCase_ ) , self.offset - 1 )
]
if len(set(UpperCamelCase_ ) ) != len(UpperCamelCase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase_ ( self: Any , UpperCamelCase_: List , UpperCamelCase_: Optional[List] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 43 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE )
lowercase__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
lowercase__ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowercase__ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowercase__ = f'layers_{str(SCREAMING_SNAKE_CASE )}'
# Self-Attention
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowercase__ = flax_model.params['''encoder''']['''block'''][str(SCREAMING_SNAKE_CASE )]['''layer''']
lowercase__ = tax_attention_key
lowercase__ = tax_attention_out
lowercase__ = tax_attention_query
lowercase__ = tax_attention_value
lowercase__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ = tax_global_layer_norm
if split_mlp_wi:
lowercase__ = tax_mlp_wi_a
lowercase__ = tax_mlp_wi_a
else:
lowercase__ = tax_mlp_wi
lowercase__ = tax_mlp_wo
lowercase__ = tax_mlp_layer_norm
lowercase__ = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowercase__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowercase__ = tax_encoder_global_rel_embedding
# Assigning
lowercase__ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowercase__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowercase__ = f'layers_{str(SCREAMING_SNAKE_CASE )}'
# Self-Attention
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowercase__ = tax_enc_dec_attention_module['''key''']['''kernel''']
lowercase__ = tax_enc_dec_attention_module['''out''']['''kernel''']
lowercase__ = tax_enc_dec_attention_module['''query''']['''kernel''']
lowercase__ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowercase__ = flax_model.params['''decoder''']['''block'''][str(SCREAMING_SNAKE_CASE )]['''layer''']
lowercase__ = tax_attention_key
lowercase__ = tax_attention_out
lowercase__ = tax_attention_query
lowercase__ = tax_attention_value
lowercase__ = tax_pre_attention_layer_norm
lowercase__ = tax_enc_dec_attention_key
lowercase__ = tax_enc_dec_attention_out
lowercase__ = tax_enc_dec_attention_query
lowercase__ = tax_enc_dec_attention_value
lowercase__ = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ = tax_mlp_wi_a
lowercase__ = tax_mlp_wi_a
else:
lowercase__ = tax_mlp_wi
lowercase__ = tax_mlp_wo
lowercase__ = txa_mlp_layer_norm
lowercase__ = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowercase__ = txa_decoder_norm
# Only for layer 0:
lowercase__ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowercase__ = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ = tax_model['''target''']['''token_embedder''']['''embedding''']
lowercase__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(SCREAMING_SNAKE_CASE )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 43 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase = {'vinai/bartpho-syllable': 1024}
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(UpperCamelCase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 43 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
lowerCAmelCase = {'mobilebert-uncased': 512}
lowerCAmelCase = {}
class _a ( UpperCamelCase__ ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = PRETRAINED_INIT_CONFIGURATION
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = MobileBertTokenizer
def __init__( self: int , UpperCamelCase_: Any=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]="[UNK]" , UpperCamelCase_: List[str]="[SEP]" , UpperCamelCase_: Optional[Any]="[PAD]" , UpperCamelCase_: List[Any]="[CLS]" , UpperCamelCase_: int="[MASK]" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowercase__ = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**UpperCamelCase_ )
lowercase__ = do_lower_case
def lowerCamelCase_ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: Any=None ) -> int:
"""simple docstring"""
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 43 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 | 1 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _a ( UpperCamelCase__ , UpperCamelCase__ ):
_lowercase : int = '''pixel_values'''
_lowercase : Optional[int] = False
_lowercase : Optional[Any] = TimmBackboneConfig
def __init__( self: Union[str, Any] , UpperCamelCase_: Tuple , **UpperCamelCase_: Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , '''timm''' )
super().__init__(UpperCamelCase_ )
lowercase__ = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(UpperCamelCase_ , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowercase__ = getattr(UpperCamelCase_ , '''use_pretrained_backbone''' , UpperCamelCase_ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase__ = config.out_indices if getattr(UpperCamelCase_ , '''out_indices''' , UpperCamelCase_ ) is not None else (-1,)
lowercase__ = timm.create_model(
config.backbone , pretrained=UpperCamelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase_ , **UpperCamelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase__ = self._backbone.return_layers
lowercase__ = {layer['''module''']: str(UpperCamelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: List[str] , **UpperCamelCase_: Optional[Any] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase__ = kwargs.pop('''config''' , TimmBackboneConfig() )
lowercase__ = kwargs.pop('''use_timm_backbone''' , UpperCamelCase_ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowercase__ = kwargs.pop('''num_channels''' , config.num_channels )
lowercase__ = kwargs.pop('''features_only''' , config.features_only )
lowercase__ = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
lowercase__ = kwargs.pop('''out_indices''' , config.out_indices )
lowercase__ = TimmBackboneConfig(
backbone=UpperCamelCase_ , num_channels=UpperCamelCase_ , features_only=UpperCamelCase_ , use_pretrained_backbone=UpperCamelCase_ , out_indices=UpperCamelCase_ , )
return super()._from_config(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: List[Any] ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Tuple=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[int] ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
"""simple docstring"""
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase__ = self._all_layers
lowercase__ = self._backbone(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = self._return_layers
lowercase__ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase__ = self._backbone(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = None
lowercase__ = tuple(UpperCamelCase_ )
lowercase__ = tuple(UpperCamelCase_ ) if hidden_states is not None else None
if not return_dict:
lowercase__ = (feature_maps,)
if output_hidden_states:
lowercase__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCamelCase_ , hidden_states=UpperCamelCase_ , attentions=UpperCamelCase_ )
| 43 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase = logging.getLogger()
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__ = parser.parse_args()
return args.f
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase__ = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'can\'t find {path}' )
return results
def _a ( ):
"""simple docstring"""
lowercase__ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( UpperCamelCase__ ):
@classmethod
def lowerCamelCase_ ( cls: int ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = 7 if get_gpu_count() > 1 else 2
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase_ )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
| 43 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = '''stabilityai/stable-diffusion-2'''
lowercase__ , lowercase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase__ = scheduler_params
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43 | 1 |
from collections import deque
from .hash_table import HashTable
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , *UpperCamelCase_: Any , **UpperCamelCase_: Tuple ) -> Any:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: List[str] , UpperCamelCase_: str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCamelCase_ )
lowercase__ = self.values[key]
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
return (
sum(self.charge_factor - len(UpperCamelCase_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase_ ) == 0
):
return key
return super()._collision_resolution(UpperCamelCase_ , UpperCamelCase_ )
| 43 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[Any] , *UpperCamelCase_: Tuple , **UpperCamelCase_: Any ) -> int:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , '''decord''' )
self.check_model_type(UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {}
if frame_sampling_rate is not None:
lowercase__ = frame_sampling_rate
if num_frames is not None:
lowercase__ = num_frames
lowercase__ = {}
if top_k is not None:
lowercase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: List[Any] , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: List[Any] ) -> Dict:
"""simple docstring"""
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Union[str, Any]=1 ) -> int:
"""simple docstring"""
if num_frames is None:
lowercase__ = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowercase__ = BytesIO(requests.get(UpperCamelCase_ ).content )
lowercase__ = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
lowercase__ = 0
lowercase__ = num_frames * frame_sampling_rate - 1
lowercase__ = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
lowercase__ = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
lowercase__ = list(UpperCamelCase_ )
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: List[str]=5 ) -> str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase__ = self.model.config.num_labels
if self.framework == "pt":
lowercase__ = model_outputs.logits.softmax(-1 )[0]
lowercase__ , lowercase__ = probs.topk(UpperCamelCase_ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase__ = scores.tolist()
lowercase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 43 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowercase__ = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
lowercase__ = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
lowercase__ = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = CTRLTokenizer
_lowercase : Dict = False
_lowercase : int = False
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase__ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ = {'''unk_token''': '''<unk>'''}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Dict , **UpperCamelCase_: str ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt react readapt apt'''
return input_text, output_text
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "://" in dataset_path:
lowercase__ = dataset_path.split('''://''' )[1]
return dataset_path
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = not is_remote_filesystem(SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE ) , fs._strip_protocol(SCREAMING_SNAKE_CASE ) )
else:
fs.mv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , recursive=SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase__ = None
lowercase__ = None
lowercase__ = threading.Lock()
| 43 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : Union[PIL.Image.Image, np.ndarray]
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowercase__ , lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowercase__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 43 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = '''luke'''
def __init__( self: Any , UpperCamelCase_: Optional[int]=50_267 , UpperCamelCase_: List[Any]=500_000 , UpperCamelCase_: Optional[int]=768 , UpperCamelCase_: Any=256 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: str=12 , UpperCamelCase_: Optional[int]=3_072 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Tuple=512 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Any=1E-1_2 , UpperCamelCase_: str=True , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: int=0 , UpperCamelCase_: Optional[Any]=2 , **UpperCamelCase_: int , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = entity_vocab_size
lowercase__ = hidden_size
lowercase__ = entity_emb_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = use_entity_aware_attention
lowercase__ = classifier_dropout
| 43 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
lowercase__ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowercase__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCamelCase_ , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 43 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
for word_or_phrase in separated:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 |
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = set()
# Replace all the whitespace in our sentence
lowercase__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE ) == 26
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = [False] * 26
for char in input_str:
if char.islower():
lowercase__ = True
elif char.isupper():
lowercase__ = True
return all(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _a ( ):
"""simple docstring"""
from timeit import timeit
lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = 256
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = ['''melgan''']
def __init__( self: List[Any] , UpperCamelCase_: SpectrogramNotesEncoder , UpperCamelCase_: SpectrogramContEncoder , UpperCamelCase_: TaFilmDecoder , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
lowercase__ = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ = 4.0 # Largest value for most examples
lowercase__ = 128
self.register_modules(
notes_encoder=UpperCamelCase_ , continuous_encoder=UpperCamelCase_ , decoder=UpperCamelCase_ , scheduler=UpperCamelCase_ , melgan=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str]=(-1.0, 1.0) , UpperCamelCase_: List[Any]=False ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = output_range
if clip:
lowercase__ = torch.clip(UpperCamelCase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int]=(-1.0, 1.0) , UpperCamelCase_: List[str]=False ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ = input_range
lowercase__ = torch.clip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = input_tokens > 0
lowercase__ , lowercase__ = self.notes_encoder(
encoder_input_tokens=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ )
lowercase__ , lowercase__ = self.continuous_encoder(
encoder_inputs=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = noise_time
if not torch.is_tensor(UpperCamelCase_ ):
lowercase__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ = self.decoder(
encodings_and_masks=UpperCamelCase_ , decoder_input_tokens=UpperCamelCase_ , decoder_noise_time=UpperCamelCase_ )
return logits
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase_: List[List[int]] , UpperCamelCase_: Optional[torch.Generator] = None , UpperCamelCase_: int = 100 , UpperCamelCase_: bool = True , UpperCamelCase_: str = "numpy" , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(UpperCamelCase_ )}.' )
lowercase__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCamelCase_ ):
if i == 0:
lowercase__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ = ones
lowercase__ = self.scale_features(
UpperCamelCase_ , output_range=[-1.0, 1.0] , clip=UpperCamelCase_ )
lowercase__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase_ , continuous_mask=UpperCamelCase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCamelCase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ = self.decode(
encodings_and_masks=UpperCamelCase_ , input_tokens=UpperCamelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowercase__ = self.scale_to_features(UpperCamelCase_ , input_range=[-1.0, 1.0] )
lowercase__ = mel[:1]
lowercase__ = mel.cpu().float().numpy()
lowercase__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ )
logger.info('''Generated segment''' , UpperCamelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowercase__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCamelCase_ )
| 43 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowercase__ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowercase__ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowercase__ = tf_top_k_top_p_filtering(UpperCamelCase_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowercase__ = output[output != -float('''inf''' )]
lowercase__ = tf.cast(
tf.where(tf.not_equal(UpperCamelCase_ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-1_2 )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@require_tf
class _a ( unittest.TestCase , UpperCamelCase__ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
_lowercase : List[Any] = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase__ = 2
lowercase__ = 2
class _a ( tf.Module ):
def __init__( self: List[Any] , UpperCamelCase_: List[str] ) -> Optional[int]:
"""simple docstring"""
super(UpperCamelCase_ , self ).__init__()
lowercase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model.generate(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , )
return {"sequences": outputs["sequences"]}
lowercase__ = [[2, 0], [102, 103]]
lowercase__ = [[1, 0], [1, 1]]
lowercase__ = DummyModel(model=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'''serving_default''': dummy_model.serving} )
lowercase__ = tf.saved_model.load(UpperCamelCase_ ).signatures['''serving_default''']
for batch_size in range(1 , len(UpperCamelCase_ ) + 1 ):
lowercase__ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowercase__ = serving_func(**UpperCamelCase_ )['''sequences''']
lowercase__ = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase__ = 1
lowercase__ = 2
class _a ( tf.Module ):
def __init__( self: Tuple , UpperCamelCase_: Dict ) -> Tuple:
"""simple docstring"""
super(UpperCamelCase_ , self ).__init__()
lowercase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: List[str] ) -> Any:
"""simple docstring"""
lowercase__ = self.model.generate(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , )
return {"sequences": outputs["sequences"]}
lowercase__ = [[2], [102, 103]]
lowercase__ = [[1], [1, 1]]
lowercase__ = DummyModel(model=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'''serving_default''': dummy_model.serving} )
lowercase__ = tf.saved_model.load(UpperCamelCase_ ).signatures['''serving_default''']
for input_row in range(len(UpperCamelCase_ ) ):
lowercase__ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
lowercase__ = serving_func(**UpperCamelCase_ )['''sequences''']
lowercase__ = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@slow
@require_tensorflow_text
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCamelCase_ )
class _a ( tf.keras.layers.Layer ):
def __init__( self: str ) -> Tuple:
"""simple docstring"""
super().__init__()
lowercase__ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCamelCase_ , '''spiece.model''' ) , '''rb''' ).read() )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowerCamelCase_ ( self: int , UpperCamelCase_: int , *UpperCamelCase_: List[Any] , **UpperCamelCase_: Dict ) -> Dict:
"""simple docstring"""
lowercase__ = self.tokenizer.tokenize(UpperCamelCase_ )
lowercase__ , lowercase__ = text.pad_model_inputs(
UpperCamelCase_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowercase__ = self.model.generate(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
return self.tokenizer.detokenize(UpperCamelCase_ )
lowercase__ = CompleteSentenceTransformer()
lowercase__ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
lowercase__ = complete_model(UpperCamelCase_ )
lowercase__ = tf.keras.Model(UpperCamelCase_ , UpperCamelCase_ )
keras_model.save(UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
lowercase__ = 14
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase__ = '''Hello, my dog is cute and'''
lowercase__ = tokenizer(UpperCamelCase_ , return_tensors='''tf''' )
lowercase__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase__ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowercase__ = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowercase__ = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowercase__ = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase__ = '''Hugging Face is a technology company based in New York and Paris.'''
lowercase__ = bart_tokenizer(UpperCamelCase_ , return_tensors='''tf''' ).input_ids
lowercase__ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase__ = bart_model.generate(UpperCamelCase_ ).numpy()
class _a ( UpperCamelCase__ ):
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Optional[int] ) -> List[Any]:
"""simple docstring"""
return super().call(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase__ = bart_model.generate(UpperCamelCase_ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(UpperCamelCase_ , UpperCamelCase_ ) )
class _a ( bart_model.model.encoder.__class__ ):
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Tuple , **UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
return super().call(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = FakeEncoder(bart_model.config , bart_model.model.shared )
lowercase__ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowercase__ = bart_model.generate(UpperCamelCase_ ).numpy()
with self.assertRaises(UpperCamelCase_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCamelCase_ , foo='''bar''' )
| 43 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = StableDiffusionInpaintPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowercase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase : List[Any] = frozenset([] )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
lowercase__ = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
lowercase__ = CLIPTextModel(UpperCamelCase_ )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=0 ) -> int:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
lowercase__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
lowercase__ = sd_pipe(**UpperCamelCase_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
lowercase__ = '''stabilityai/stable-diffusion-2-inpainting'''
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
lowercase__ = '''stabilityai/stable-diffusion-2-inpainting'''
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCamelCase_ ( self: Tuple ) -> Dict:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''stabilityai/stable-diffusion-2-inpainting'''
lowercase__ = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 43 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 43 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : str = '''open-llama'''
def __init__( self: Any , UpperCamelCase_: Union[str, Any]=100_000 , UpperCamelCase_: Optional[int]=4_096 , UpperCamelCase_: Tuple=11_008 , UpperCamelCase_: Union[str, Any]=32 , UpperCamelCase_: Union[str, Any]=32 , UpperCamelCase_: List[str]="silu" , UpperCamelCase_: Tuple=2_048 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: List[str]=1E-6 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Any=0 , UpperCamelCase_: Tuple=1 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Dict=True , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Optional[Any]=None , **UpperCamelCase_: List[str] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = rms_norm_eps
lowercase__ = use_cache
lowercase__ = kwargs.pop(
'''use_memorry_efficient_attention''' , UpperCamelCase_ )
lowercase__ = hidden_dropout_prob
lowercase__ = attention_dropout_prob
lowercase__ = use_stable_embedding
lowercase__ = shared_input_output_embedding
lowercase__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , tie_word_embeddings=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'got {self.rope_scaling}' )
lowercase__ = self.rope_scaling.get('''type''' , UpperCamelCase_ )
lowercase__ = self.rope_scaling.get('''factor''' , UpperCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
lowercase__ , lowercase__ , lowercase__ = equationa
lowercase__ , lowercase__ , lowercase__ = equationa
# Calculate the determinants of the matrices
lowercase__ = aa * ba - aa * ba
lowercase__ = ca * ba - ca * ba
lowercase__ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowercase__ = determinant_x / determinant
lowercase__ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 43 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase = logging.get_logger(__name__)
class _a ( enum.Enum ):
_lowercase : Union[str, Any] = 0
_lowercase : List[str] = 1
@add_end_docstrings(UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
_lowercase : Any = '''generated'''
def __init__( self: int , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Any=None , **UpperCamelCase_: Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = {}
if truncation is not None:
lowercase__ = truncation
lowercase__ = generate_kwargs
lowercase__ = {}
if return_tensors is not None and return_type is None:
lowercase__ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
if len(UpperCamelCase_ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ) -> Optional[int]:
"""simple docstring"""
return True
def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , UpperCamelCase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase__ = ([prefix + arg for arg in args[0]],)
lowercase__ = True
elif isinstance(args[0] , UpperCamelCase_ ):
lowercase__ = (prefix + args[0],)
lowercase__ = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
lowercase__ = self.tokenizer(*UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
if (
isinstance(args[0] , UpperCamelCase_ )
and all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for el in args[0] )
and all(len(UpperCamelCase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCamelCase_: Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self._parse_and_tokenize(UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ )
return inputs
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[int] , **UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
if self.framework == "pt":
lowercase__ , lowercase__ = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase__ , lowercase__ = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase__ = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase__ = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(UpperCamelCase_ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase__ = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = output_ids.shape[0]
if self.framework == "pt":
lowercase__ = output_ids.reshape(UpperCamelCase_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(UpperCamelCase_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=ReturnType.TEXT , UpperCamelCase_: Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase__ = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
lowercase__ = {
f'{self.return_name}_text': self.tokenizer.decode(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
}
records.append(UpperCamelCase_ )
return records
@add_end_docstrings(UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
_lowercase : int = '''summary'''
def __call__( self: Any , *UpperCamelCase_: Tuple , **UpperCamelCase_: str ) -> Tuple:
"""simple docstring"""
return super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = '''translation'''
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase_: Optional[int] , UpperCamelCase_: int=TruncationStrategy.DO_NOT_TRUNCATE , UpperCamelCase_: Any=None , UpperCamelCase_: List[str]=None ) -> Any:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , UpperCamelCase_ ):
return self.tokenizer._build_translation_inputs(
*UpperCamelCase_ , return_tensors=self.framework , truncation=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ )
else:
return super()._parse_and_tokenize(*UpperCamelCase_ , truncation=UpperCamelCase_ )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: str=None , **UpperCamelCase_: Dict ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = super()._sanitize_parameters(**UpperCamelCase_ )
if src_lang is not None:
lowercase__ = src_lang
if tgt_lang is not None:
lowercase__ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase__ = kwargs.get('''task''' , self.task )
lowercase__ = task.split('''_''' )
if task and len(UpperCamelCase_ ) == 4:
# translation, XX, to YY
lowercase__ = items[1]
lowercase__ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self: Optional[Any] , *UpperCamelCase_: Tuple , **UpperCamelCase_: Dict ) -> Tuple:
"""simple docstring"""
return super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
| 43 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 | 1 |
import functools
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(SCREAMING_SNAKE_CASE )
lowercase__ = len(SCREAMING_SNAKE_CASE )
@functools.cache
def min_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowercase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE ) , 1 + min_distance(SCREAMING_SNAKE_CASE , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
class _a :
def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 43 | 1 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCAmelCase = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class _a ( tr.AbstractTransform ):
def __init__( self: int , UpperCamelCase_: str = " " ) -> List[str]:
"""simple docstring"""
lowercase__ = sentence_delimiter
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: str ) -> Tuple:
"""simple docstring"""
return list(UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[str] ) -> int:
"""simple docstring"""
lowercase__ = []
for sent_idx, sentence in enumerate(UpperCamelCase_ ):
chars.extend(self.process_string(UpperCamelCase_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCAmelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCAmelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCAmelCase = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCAmelCase = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
lowerCAmelCase = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any]=False ) -> List[str]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase_ , UpperCamelCase_ , truth_transform=UpperCamelCase_ , hypothesis_transform=UpperCamelCase_ , )["wer"]
lowercase__ = 0
lowercase__ = 0
for prediction, reference in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = jiwer.compute_measures(
UpperCamelCase_ , UpperCamelCase_ , truth_transform=UpperCamelCase_ , hypothesis_transform=UpperCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 43 |
lowerCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase = {value: key for key, value in encode_dict.items()}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowercase__ = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | 1 |
class _a :
def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 43 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase = {'vinai/bartpho-syllable': 1024}
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(UpperCamelCase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 43 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = XLMTokenizer
_lowercase : str = False
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = '''lower newer'''
lowercase__ = '''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
lowercase__ = XLMTokenizer(self.vocab_file , self.merges_file )
lowercase__ = '''lower'''
lowercase__ = ['''low''', '''er</w>''']
lowercase__ = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokens + ['''<unk>''']
lowercase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowercase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 43 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 43 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase = logging.getLogger()
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__ = parser.parse_args()
return args.f
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase__ = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'can\'t find {path}' )
return results
def _a ( ):
"""simple docstring"""
lowercase__ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( UpperCamelCase__ ):
@classmethod
def lowerCamelCase_ ( cls: int ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = 7 if get_gpu_count() > 1 else 2
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase_ )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
| 43 | 1 |
from random import randint, random
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 5 , ):
"""simple docstring"""
lowercase__ = [[-1] * number_of_cells] # Create a highway without any car
lowercase__ = 0
lowercase__ = max(SCREAMING_SNAKE_CASE , 0 )
while i < number_of_cells:
lowercase__ = (
randint(0 , SCREAMING_SNAKE_CASE ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0
lowercase__ = highway_now[car_index + 1 :]
for cell in range(len(SCREAMING_SNAKE_CASE ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(SCREAMING_SNAKE_CASE , -1 )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(SCREAMING_SNAKE_CASE )
# Beforce calculations, the highway is empty
lowercase__ = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase__ = min(highway_now[car_index] + 1 , SCREAMING_SNAKE_CASE )
# Number of empty cell before the next car
lowercase__ = get_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - 1
# We can't have the car causing an accident
lowercase__ = min(next_highway[car_index] , SCREAMING_SNAKE_CASE )
if random() < probability:
# Randomly, a driver will slow down
lowercase__ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(highway[0] )
for i in range(SCREAMING_SNAKE_CASE ):
lowercase__ = update(highway[i] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
lowercase__ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase__ = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase__ = speed
highway.append(SCREAMING_SNAKE_CASE )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''trocr'''
_lowercase : int = ['''past_key_values''']
_lowercase : Dict = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self: int , UpperCamelCase_: Tuple=50_265 , UpperCamelCase_: List[str]=1_024 , UpperCamelCase_: Dict=12 , UpperCamelCase_: Optional[Any]=16 , UpperCamelCase_: Tuple=4_096 , UpperCamelCase_: Tuple="gelu" , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Any=0.0 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: Dict=True , UpperCamelCase_: Dict=False , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[str]=1 , UpperCamelCase_: Union[str, Any]=0 , UpperCamelCase_: Tuple=2 , **UpperCamelCase_: str , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = activation_function
lowercase__ = max_position_embeddings
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = init_std
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = scale_embedding
lowercase__ = use_learned_position_embeddings
lowercase__ = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 43 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
if attention_mask is None:
lowercase__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _a :
def __init__( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: str=7 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: str=99 , UpperCamelCase_: Any=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=4 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: int=32 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Optional[int]=0.02 , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = eos_token_id
lowercase__ = pad_token_id
lowercase__ = bos_token_id
lowercase__ = initializer_range
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowercase__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowercase__ = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowercase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
lowercase__ = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 20
lowercase__ = model_class_name(UpperCamelCase_ )
lowercase__ = model.encode(inputs_dict['''input_ids'''] )
lowercase__ , lowercase__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase__ = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase__ = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = 20
lowercase__ = model_class_name(UpperCamelCase_ )
lowercase__ = model.encode(inputs_dict['''input_ids'''] )
lowercase__ , lowercase__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase__ = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase__ = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class _a ( unittest.TestCase ):
_lowercase : List[str] = 99
def lowerCamelCase_ ( self: str ) -> List[Any]:
"""simple docstring"""
lowercase__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowercase__ = input_ids.shape[0]
lowercase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase_ ( self: Dict ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._get_config_and_data()
lowercase__ = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowercase__ = lm_model(input_ids=UpperCamelCase_ )
lowercase__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowercase__ = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowercase__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowercase__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowercase__ = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowercase__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowercase__ = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowercase__ = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
lowercase__ = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _a ( UpperCamelCase__ , unittest.TestCase , UpperCamelCase__ ):
_lowercase : Optional[Any] = True
_lowercase : Union[str, Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_lowercase : Any = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaxBlenderbotModelTester(self )
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_: Dict , UpperCamelCase_: int=None , **UpperCamelCase_: Dict ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowercase__ = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase__ = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowercase__ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowercase__ = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase__ = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase__ = np.ones((1, 1) ) * model.config.eos_token_id
lowercase__ = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
lowercase__ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowercase__ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ )
lowercase__ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowercase__ = ['''Sam''']
lowercase__ = tokenizer(UpperCamelCase_ , return_tensors='''jax''' )
lowercase__ = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = '''Sam is a great name. It means "sun" in Gaelic.'''
lowercase__ = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 43 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE ):
return ext
raise Exception(
f'Unable to determine file format from file extension {path}. '
f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowercase__ = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
lowercase__ = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: Pipeline , UpperCamelCase_: PipelineDataFormat ) -> Tuple:
"""simple docstring"""
lowercase__ = nlp
lowercase__ = reader
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> Dict:
"""simple docstring"""
lowercase__ = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=UpperCamelCase_ , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=UpperCamelCase_ , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=UpperCamelCase_ , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=UpperCamelCase_ , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=UpperCamelCase_ , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=UpperCamelCase_ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=UpperCamelCase_ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=UpperCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self._nlp, []
for entry in self._reader:
lowercase__ = nlp(**UpperCamelCase_ ) if self._reader.is_multi_columns else nlp(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
outputs.append(UpperCamelCase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowercase__ = self._reader.save_binary(UpperCamelCase_ )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(UpperCamelCase_ )
| 43 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase = 5_0003
lowerCAmelCase = 5_0002
@require_sentencepiece
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = PLBartTokenizer
_lowercase : Optional[Any] = None
_lowercase : int = False
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PLBartTokenizer(UpperCamelCase_ , language_codes='''base''' , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ = PLBartTokenizer(UpperCamelCase_ , language_codes='''base''' , keep_accents=UpperCamelCase_ )
lowercase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase__ = tokenizer.vocab_size
lowercase__ = [tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) for x in range(end - 4 , UpperCamelCase_ )]
self.assertListEqual(UpperCamelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
lowercase__ = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase__ = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) , UpperCamelCase_ , )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = PLBartTokenizer(UpperCamelCase_ , language_codes='''multi''' , keep_accents=UpperCamelCase_ )
lowercase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase__ = tokenizer.vocab_size
lowercase__ = [tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) for x in range(end - 7 , UpperCamelCase_ )]
self.assertListEqual(
UpperCamelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
lowercase__ = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase__ = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) , UpperCamelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
_lowercase : Union[str, Any] = '''uclanlp/plbart-python-en_XX'''
_lowercase : Any = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
_lowercase : Union[str, Any] = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
_lowercase : Tuple = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase_ ( cls: List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
lowercase__ = 1
return cls
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 )
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
lowercase__ = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
lowercase__ = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowercase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , UpperCamelCase_ )
lowercase__ = 10
lowercase__ = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
lowercase__ = PLBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors='''pt''' )
lowercase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , UpperCamelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowercase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' )
lowercase__ = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors='''pt''' )
lowercase__ = targets['''input_ids''']
lowercase__ = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , )
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = s.rsplit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return new.join(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase__ = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase__ = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
lowercase__ = rreplace(SCREAMING_SNAKE_CASE , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
lowercase__ = rreplace(SCREAMING_SNAKE_CASE , '''.b''' , '''.bias''' , 1 )
lowercase__ = value.float()
return upgrade
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
from dall_e import Encoder
lowercase__ = Encoder()
if os.path.exists(SCREAMING_SNAKE_CASE ):
lowercase__ = torch.load(SCREAMING_SNAKE_CASE )
else:
lowercase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = ckpt.state_dict()
encoder.load_state_dict(SCREAMING_SNAKE_CASE )
if config_path is not None:
lowercase__ = FlavaImageCodebookConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
lowercase__ = FlavaImageCodebookConfig()
lowercase__ = FlavaImageCodebook(SCREAMING_SNAKE_CASE ).eval()
lowercase__ = encoder.state_dict()
lowercase__ = upgrade_state_dict(SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
lowercase__ = hf_model.state_dict()
lowercase__ = count_parameters(SCREAMING_SNAKE_CASE )
lowercase__ = count_parameters(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCAmelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 43 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : Union[PIL.Image.Image, np.ndarray]
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowercase__ , lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowercase__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 43 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Union[str, Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 43 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = StableDiffusionSAGPipeline
_lowercase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase : List[str] = False
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(UpperCamelCase_ )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any]=0 ) -> Any:
"""simple docstring"""
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: int ) -> List[str]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowercase__ = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''.'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase__ = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''.'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase__ = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''.'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 43 |
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = set()
# Replace all the whitespace in our sentence
lowercase__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE ) == 26
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = [False] * 26
for char in input_str:
if char.islower():
lowercase__ = True
elif char.isupper():
lowercase__ = True
return all(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _a ( ):
"""simple docstring"""
from timeit import timeit
lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 1 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase__ = None
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
elif name.split('''.''' )[0] == "proj":
lowercase__ = fairseq_model.proj
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
lowercase__ = '''weight'''
else:
lowercase__ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = [line.split(''' ''' )[0] for line in lines]
lowercase__ = len(SCREAMING_SNAKE_CASE )
lowercase__ = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowercase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(SCREAMING_SNAKE_CASE )
lowercase__ = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
lowercase__ = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowercase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowercase__ = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
lowercase__ = False
# add projection layer
lowercase__ = nn.Parameter(projection_layer.weight )
lowercase__ = nn.Parameter(projection_layer.bias )
lowercase__ = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = '''speech_to_text_2'''
lowercase__ = '''wav2vec2'''
lowercase__ = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 43 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
lowerCAmelCase = {
'camembert-base': 512,
}
lowerCAmelCase = '▁'
class _a ( UpperCamelCase__ ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_lowercase : Tuple = CamembertTokenizer
def __init__( self: List[Any] , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Dict=None , UpperCamelCase_: str="<s>" , UpperCamelCase_: Any="</s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Tuple="<pad>" , UpperCamelCase_: Optional[Any]="<mask>" , UpperCamelCase_: Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase_: Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowerCamelCase_ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 43 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 43 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _a ( unittest.TestCase ):
def __init__( self: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Dict=3 , UpperCamelCase_: Any=10 , UpperCamelCase_: Optional[int]=18 , UpperCamelCase_: List[Any]=30 , UpperCamelCase_: Tuple=400 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=None , UpperCamelCase_: Tuple=True , UpperCamelCase_: str=[0.5, 0.5, 0.5] , UpperCamelCase_: List[Any]=[0.5, 0.5, 0.5] , UpperCamelCase_: Dict=None , ) -> Any:
"""simple docstring"""
lowercase__ = size if size is not None else {'''shortest_edge''': 18}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = num_frames
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = crop_size
def lowerCamelCase_ ( self: int ) -> List[str]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = VivitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = VivitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowercase__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowercase__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['MobileViTFeatureExtractor']
lowerCAmelCase = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _a ( datasets.BuilderConfig ):
_lowercase : Optional[datasets.Features] = None
class _a ( datasets.ArrowBasedBuilder ):
_lowercase : Optional[int] = PandasConfig
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[Any] ) -> Optional[int]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowercase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase_ , (str, list, tuple) ):
lowercase__ = data_files
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__ = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase__ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__ = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase__ = table_cast(UpperCamelCase_ , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: List[str] ) -> Union[str, Any]:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(UpperCamelCase_ ) ):
with open(UpperCamelCase_ , '''rb''' ) as f:
lowercase__ = pa.Table.from_pandas(pd.read_pickle(UpperCamelCase_ ) )
yield i, self._cast_table(UpperCamelCase_ )
| 43 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 | 1 |
from __future__ import annotations
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = 2
while True:
lowercase__ = factor_map.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if factor:
lowercase__ = factor + prime
while x in factor_map:
x += factor
lowercase__ = factor
else:
lowercase__ = prime
yield prime
prime += 1
def _a ( SCREAMING_SNAKE_CASE = 1E10 ):
"""simple docstring"""
lowercase__ = sieve()
lowercase__ = 1
while True:
lowercase__ = next(SCREAMING_SNAKE_CASE )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE )
n += 2
if __name__ == "__main__":
print(solution())
| 43 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=13 , UpperCamelCase_: List[Any]=7 , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: str=False , UpperCamelCase_: Any=False , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=99 , UpperCamelCase_: Tuple=0 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: List[str]=5 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=512 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: Dict="last" , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , lengths=UpperCamelCase_ , langs=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , langs=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any , ) -> str:
"""simple docstring"""
lowercase__ = FlaubertWithLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any] , ) -> Dict:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: int , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnswering(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
lowercase__ = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , p_mask=UpperCamelCase_ , )
lowercase__ = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , )
((lowercase__) , ) = result_with_labels.to_tuple()
lowercase__ = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
((lowercase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , ) -> int:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = FlaubertForTokenClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: int , ) -> List[str]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = FlaubertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : str = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Any=False ) -> Tuple:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ = FlaubertModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 )
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: int ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = torch.jit.trace(
UpperCamelCase_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , '''traced_model.pt''' ) )
lowercase__ = torch.jit.load(os.path.join(UpperCamelCase_ , '''traced_model.pt''' ) , map_location=UpperCamelCase_ )
loaded(inputs_dict['''input_ids'''].to(UpperCamelCase_ ) , inputs_dict['''attention_mask'''].to(UpperCamelCase_ ) )
@require_torch
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowercase__ = model(UpperCamelCase_ )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
lowercase__ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 43 |
class _a :
def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 43 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _a ( unittest.TestCase ):
def __init__( self: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple=7 , UpperCamelCase_: int=3 , UpperCamelCase_: int=18 , UpperCamelCase_: Optional[Any]=30 , UpperCamelCase_: str=400 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[str]=None , ) -> Tuple:
"""simple docstring"""
lowercase__ = size if size is not None else {'''height''': 20, '''width''': 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1_024, 2_048, 4_096]
lowercase__ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
lowercase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = PixaStructImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2_048
lowercase__ = image_processor(UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCamelCase_ ):
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
lowercase__ = '''Hello'''
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self: List[Any] ) -> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = PixaStructImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = PixaStructImageProcessingTester(self , num_channels=4 )
lowercase__ = 3
@property
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) )
def lowerCamelCase_ ( self: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 43 |
lowerCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase = {value: key for key, value in encode_dict.items()}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowercase__ = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | 1 |
from collections import defaultdict
class _a :
def __init__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowercase__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
lowercase__ = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowercase__ = (1 << len(UpperCamelCase_ )) - 1
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] ) -> List[str]:
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowercase__ = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
lowercase__ = total_ways_util
return self.dp[mask][task_no]
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCAmelCase = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCAmelCase = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 43 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , args=(SCREAMING_SNAKE_CASE) )[0]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return math.pow(SCREAMING_SNAKE_CASE , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase = {'vinai/bartpho-syllable': 1024}
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(UpperCamelCase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 43 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 | 1 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return array
lowercase__ , lowercase__ = min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
# Compute the variables
lowercase__ = _max - _min + 1
lowercase__ , lowercase__ = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase__ = i - _min
lowercase__ = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase__ = 0
for i in range(SCREAMING_SNAKE_CASE ):
while holes_repeat[i] > 0:
lowercase__ = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = input('Enter numbers separated by comma:\n')
lowerCAmelCase = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 43 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase = logging.getLogger()
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__ = parser.parse_args()
return args.f
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase__ = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'can\'t find {path}' )
return results
def _a ( ):
"""simple docstring"""
lowercase__ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( UpperCamelCase__ ):
@classmethod
def lowerCamelCase_ ( cls: int ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = 7 if get_gpu_count() > 1 else 2
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase_ )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[float('''inf''' ) for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ = dist[i][k] + dist[k][j]
_print_dist(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
lowerCAmelCase = int(input('Enter number of vertices: '))
lowerCAmelCase = int(input('Enter number of edges: '))
lowerCAmelCase = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase = int(input('Enter source:'))
lowerCAmelCase = int(input('Enter destination:'))
lowerCAmelCase = float(input('Enter weight:'))
lowerCAmelCase = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 43 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: int=13 , UpperCamelCase_: str=30 , UpperCamelCase_: str=2 , UpperCamelCase_: int=3 , UpperCamelCase_: Any=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Any=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: str=10 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: str=None , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowercase__ = self.image_size // 2
lowercase__ = pixel_values[:, :, :image_size, :image_size]
lowercase__ = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = TFViTForImageClassification(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowercase__ = self.image_size // 2
lowercase__ = pixel_values[:, :, :image_size, :image_size]
lowercase__ = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTForImageClassification(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_lowercase : List[Any] = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_lowercase : List[str] = False
_lowercase : int = False
_lowercase : str = False
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# forward pass
lowercase__ = model(**UpperCamelCase_ )
# verify the logits
lowercase__ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''time_series_transformer'''
_lowercase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: str , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "student_t" , UpperCamelCase_: str = "nll" , UpperCamelCase_: int = 1 , UpperCamelCase_: List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase_: Optional[Union[str, bool]] = "mean" , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: bool = True , UpperCamelCase_: str = "gelu" , UpperCamelCase_: int = 64 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: int = 100 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: List[Any]=True , **UpperCamelCase_: List[str] , ) -> str:
"""simple docstring"""
lowercase__ = prediction_length
lowercase__ = context_length or prediction_length
lowercase__ = distribution_output
lowercase__ = loss
lowercase__ = input_size
lowercase__ = num_time_features
lowercase__ = lags_sequence
lowercase__ = scaling
lowercase__ = num_dynamic_real_features
lowercase__ = num_static_real_features
lowercase__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = cardinality
else:
lowercase__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = embedding_dimension
else:
lowercase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ = num_parallel_samples
# Transformer architecture configuration
lowercase__ = input_size * len(UpperCamelCase_ ) + self._number_of_features
lowercase__ = d_model
lowercase__ = encoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = encoder_ffn_dim
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = decoder_layers
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = use_cache
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: Optional[int] ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 43 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowercase__ = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ = torch.permute(SCREAMING_SNAKE_CASE , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE ):
# linear layer
lowercase__ = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase__ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "metadata" in layer:
lowercase__ = layer.split('''metadata''' )
lowercase__ = ''''''.join(split_layer[0] )[:-1]
lowercase__ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
lowercase__ = layer.split('''kvstore''' )
lowercase__ = ''''''.join(split_layer[0] )[:-1]
lowercase__ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
lowercase__ = layer.split('''/''' )
lowercase__ = '''/'''.join(split_layer[:-1] )
lowercase__ = (split_layer[-1],)
if "kvstore/path" in layer:
lowercase__ = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
lowercase__ = '''file'''
else:
lowercase__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
lowercase__ = {}
for k, v in current_block.items():
lowercase__ = v
lowercase__ = new_current_block
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = WEIGHTS_NAME ):
"""simple docstring"""
lowercase__ = convert_file_size_to_int(SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = {}
lowercase__ = 0
lowercase__ = 0
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
lowercase__ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE , sep='''/''' )
lowercase__ = {}
for layer in checkpoint_info.keys():
lowercase__ , lowercase__ , lowercase__ = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if curr_real_layer_name in all_layers:
lowercase__ = content
else:
lowercase__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowercase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowercase__ , lowercase__ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , SCREAMING_SNAKE_CASE )
lowercase__ = '''/'''.join(SCREAMING_SNAKE_CASE )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowercase__ = os.path.join(
SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowercase__ = {}
lowercase__ = 0
lowercase__ = raw_weights.to(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowercase__ = {}
lowercase__ = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = weights_name.replace(
'''.bin''' , f'-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE ):05d}.bin' ) # len(sharded_state_dicts):05d}
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ = shard
for key in shard:
lowercase__ = shard_file
# Add the metadata
lowercase__ = {'''total_size''': total_size}
lowercase__ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' , encoding='''utf-8''' ) as f:
lowercase__ = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowerCAmelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _a ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowercase__ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
lowercase__ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
lowercase__ = TaTokenizer.from_pretrained('''t5-small''' )
lowercase__ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
lowercase__ = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids
lowercase__ = model.generate(SCREAMING_SNAKE_CASE , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 43 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
from collections.abc import Sequence
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
lowercase__ = result * x + coeff
return result
if __name__ == "__main__":
lowerCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCAmelCase = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _a ( UpperCamelCase__ ):
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: float ) -> float:
"""simple docstring"""
return 0.0
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 5_12
lowercase__ = [1] + [0] * (size - 1)
lowercase__ = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs]
lowercase__ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase__ = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE ) )
lowercase__ = 20 * np.logaa(SCREAMING_SNAKE_CASE )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
lowercase__ = get_bounds(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 5_12
lowercase__ = [1] + [0] * (size - 1)
lowercase__ = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs]
lowercase__ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase__ = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE , -2 * pi ) )
plt.show()
| 43 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : Union[PIL.Image.Image, np.ndarray]
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowercase__ , lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowercase__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 43 | 1 |
import collections
import os
import re
from pathlib import Path
lowerCAmelCase = 'src/transformers'
# Matches is_xxx_available()
lowerCAmelCase = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCAmelCase = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCAmelCase = re.compile(R'^\s*try:')
# Catches a line with else:
lowerCAmelCase = re.compile(R'^\s*else:')
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
lowercase__ = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
lowercase__ = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
lowercase__ = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase__ = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase__ = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(''', ''' )
lowercase__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(''', ''' )
lowercase__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ = []
for key in import_dict_objects.keys():
lowercase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
lowercase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ = '''base imports''' if key == '''none''' else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def _a ( ):
"""simple docstring"""
lowercase__ = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''__init__.py''' )
lowercase__ = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase__ = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE ) )
def _a ( ):
"""simple docstring"""
lowercase__ = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase__ = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
lowerCAmelCase = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def _a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
lowercase__ = direct_transformers_import(SCREAMING_SNAKE_CASE )
lowercase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE , '''__init__.py''' ) , '''r''' ) as f:
lowercase__ = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE ) ) )
lowercase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '''\n'''.join(f'- {module}' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f'{list_of_modules}\n'
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 43 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = '''backbone.''' if is_semantic else ''''''
lowercase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', '''beit.embeddings.cls_token'''),
(f'{prefix}patch_embed.proj.weight', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'{prefix}patch_embed.proj.bias', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'{prefix}pos_embed', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase__ = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
lowercase__ = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
lowercase__ = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
lowercase__ = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
lowercase__ = in_proj_weight[
: config.hidden_size, :
]
lowercase__ = q_bias
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase__ = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
lowercase__ = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
lowercase__ = gamma_a
lowercase__ = gamma_a
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE )
lowercase__ = val
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = False if '''rvlcdip''' in checkpoint_url else True
lowercase__ = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE , use_mask_token=SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase__ = 10_24
lowercase__ = 40_96
lowercase__ = 24
lowercase__ = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase__ = 16
lowercase__ = '''huggingface/label-files'''
lowercase__ = '''rvlcdip-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase__ = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowercase__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowercase__ = encoding['''pixel_values''']
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# verify logits
lowercase__ = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
lowercase__ = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
lowercase__ = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
lowerCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 43 |
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = set()
# Replace all the whitespace in our sentence
lowercase__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE ) == 26
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = [False] * 26
for char in input_str:
if char.islower():
lowercase__ = True
elif char.isupper():
lowercase__ = True
return all(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _a ( ):
"""simple docstring"""
from timeit import timeit
lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 1 |
from __future__ import annotations
from typing import Any
class _a :
def __init__( self: int , UpperCamelCase_: int ) -> None:
"""simple docstring"""
lowercase__ = num_of_nodes
lowercase__ = []
lowercase__ = {}
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ) -> None:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int ) -> int:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCamelCase_ ( self: str , UpperCamelCase_: int ) -> None:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase__ = self.find_component(UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: list[int] , UpperCamelCase_: int , UpperCamelCase_: int ) -> None:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowercase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowercase__ = self.find_component(UpperCamelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> None:
"""simple docstring"""
lowercase__ = []
lowercase__ = 0
lowercase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = self.m_component[u]
lowercase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = self.m_component[u]
lowercase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
lowercase__ = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def _a ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def _a ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 43 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43 | 1 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 43 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
def __init__( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: int = 32 , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 255 , UpperCamelCase_: bool = True , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , UpperCamelCase_: Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , UpperCamelCase_: bool = True , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Union[str, Any]=30 , UpperCamelCase_: Optional[int]=400 , UpperCamelCase_: List[str]=3 , ) -> Tuple:
"""simple docstring"""
lowercase__ = parent
lowercase__ = do_resize
lowercase__ = size if size is not None else {'''shortest_edge''': 288}
lowercase__ = size_divisor
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = do_center_crop
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = do_pad
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = min_resolution
lowercase__ = max_resolution
def lowerCamelCase_ ( self: Tuple ) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any]=False ) -> str:
"""simple docstring"""
if not batched:
lowercase__ = self.size['''shortest_edge''']
lowercase__ = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
lowercase__ , lowercase__ = image.size
else:
lowercase__ , lowercase__ = image.shape[1], image.shape[2]
lowercase__ = size / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowercase__ , lowercase__ = size, scale * w
else:
lowercase__ , lowercase__ = scale * h, size
lowercase__ = int((1_333 / 800) * size )
if max(UpperCamelCase_ , UpperCamelCase_ ) > max_size:
lowercase__ = max_size / max(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = newh * scale
lowercase__ = neww * scale
lowercase__ , lowercase__ = int(newh + 0.5 ), int(neww + 0.5 )
lowercase__ , lowercase__ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowercase__ = []
for image in image_inputs:
lowercase__ , lowercase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
lowercase__ = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ = BridgeTowerImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size_divisor''' ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=8 ):
"""simple docstring"""
lowercase__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _a ( UpperCamelCase__ ):
def __init__( self: str , UpperCamelCase_: MultilingualCLIP , UpperCamelCase_: XLMRobertaTokenizer , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase_: VQModel , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: List[str] ) -> List[str]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int]=None , ) -> List[str]:
"""simple docstring"""
lowercase__ = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
lowercase__ = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , truncation=UpperCamelCase_ , max_length=77 , return_attention_mask=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' , )
lowercase__ = text_inputs.input_ids
lowercase__ = self.tokenizer(UpperCamelCase_ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowercase__ = text_input_ids.to(UpperCamelCase_ )
lowercase__ = text_inputs.attention_mask.to(UpperCamelCase_ )
lowercase__ , lowercase__ = self.text_encoder(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowercase__ = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = text_encoder_hidden_states.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = text_mask.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(UpperCamelCase_ ) is not type(UpperCamelCase_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase_ )} !='
f' {type(UpperCamelCase_ )}.' )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = [negative_prompt]
elif batch_size != len(UpperCamelCase_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=77 , truncation=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' , )
lowercase__ = uncond_input.input_ids.to(UpperCamelCase_ )
lowercase__ = uncond_input.attention_mask.to(UpperCamelCase_ )
lowercase__ , lowercase__ = self.text_encoder(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = negative_prompt_embeds.shape[1]
lowercase__ = negative_prompt_embeds.repeat(1 , UpperCamelCase_ )
lowercase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ )
lowercase__ = uncond_text_encoder_hidden_states.shape[1]
lowercase__ = uncond_text_encoder_hidden_states.repeat(1 , UpperCamelCase_ , 1 )
lowercase__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
lowercase__ = uncond_text_mask.repeat_interleave(UpperCamelCase_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int=0 ) -> List[str]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int=0 ) -> str:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
if self.safety_checker is not None:
lowercase__ , lowercase__ = cpu_offload_with_hook(self.safety_checker , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase_ ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Optional[Union[str, List[str]]] = None , UpperCamelCase_: int = 512 , UpperCamelCase_: int = 512 , UpperCamelCase_: int = 100 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Tuple:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ , lowercase__ , lowercase__ = self._encode_prompt(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = torch.cat(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = get_new_h_w(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
lowercase__ = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , ).prev_sample
# post-processing
lowercase__ = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 1 |
from collections.abc import Callable
import numpy as np
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = int(np.ceil((x_end - xa) / step_size ) )
lowercase__ = np.zeros((n + 1,) )
lowercase__ = ya
lowercase__ = xa
for k in range(SCREAMING_SNAKE_CASE ):
lowercase__ = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( UpperCamelCase__ ):
_lowercase : str = '''new-model'''
if is_tf_available():
class _a ( UpperCamelCase__ ):
_lowercase : int = NewModelConfig
@require_tf
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = '''bert-base-cased'''
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Tuple ) -> str:
"""simple docstring"""
lowercase__ = '''bert-base-cased'''
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase_ )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase_ , output_loading_info=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase_ )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase_ , output_loading_info=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ , output_loading_info=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Any ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@slow
@require_tensorflow_probability
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCamelCase_ )
lowercase__ , lowercase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCamelCase_ , output_loading_info=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 14_410 )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 14_410 )
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = copy.deepcopy(model.config )
lowercase__ = ['''FunnelBaseModel''']
lowercase__ = TFAutoModel.from_config(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCamelCase_ )
lowercase__ = TFAutoModel.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
try:
AutoConfig.register('''new-model''' , UpperCamelCase_ )
lowercase__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(UpperCamelCase_ ):
auto_class.register(UpperCamelCase_ , UpperCamelCase_ )
auto_class.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
auto_class.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__ = BertModelTester(self ).get_config()
lowercase__ = NewModelConfig(**tiny_config.to_dict() )
lowercase__ = auto_class.from_config(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCamelCase_ )
lowercase__ = auto_class.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase__ = TFAutoModel.from_pretrained('''bert-base''' )
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase__ = TFAutoModel.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' )
def lowerCamelCase_ ( self: Any ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(UpperCamelCase_ , '''Use `from_pt=True` to load this model''' ):
lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def lowerCamelCase_ ( self: str ) -> List[Any]:
"""simple docstring"""
lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowercase__ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
lowercase__ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 43 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase = 16
lowerCAmelCase = 32
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 ):
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
lowercase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase = mocked_dataloaders # noqa: F811
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config['''lr''']
lowercase__ = int(config['''num_epochs'''] )
lowercase__ = int(config['''seed'''] )
lowercase__ = int(config['''batch_size'''] )
lowercase__ = evaluate.load('''glue''' , '''mrpc''' )
set_seed(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ = parser.parse_args()
lowercase__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43 |
class _a :
def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 43 | 1 |
from torch import nn
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 43 |
lowerCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase = {value: key for key, value in encode_dict.items()}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowercase__ = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = tf.data.AUTOTUNE
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=SCREAMING_SNAKE_CASE , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=SCREAMING_SNAKE_CASE , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=SCREAMING_SNAKE_CASE , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=SCREAMING_SNAKE_CASE , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=SCREAMING_SNAKE_CASE , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE , default=5_12 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=SCREAMING_SNAKE_CASE , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=SCREAMING_SNAKE_CASE , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase__ = parser.parse_args()
return args
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
if args.tpu_name:
lowercase__ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(SCREAMING_SNAKE_CASE )
return tpu
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0
for file in file_list:
lowercase__ = file.split('''/''' )[-1]
lowercase__ = re.search(R'''-\d+-(\d+)\.tfrecord''' , SCREAMING_SNAKE_CASE ).group(1 )
lowercase__ = int(SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase__ = count_samples(SCREAMING_SNAKE_CASE )
lowercase__ = tf.data.Dataset.from_tensor_slices(SCREAMING_SNAKE_CASE )
if shuffle:
lowercase__ = dataset.shuffle(len(SCREAMING_SNAKE_CASE ) )
lowercase__ = tf.data.TFRecordDataset(SCREAMING_SNAKE_CASE , num_parallel_reads=SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase__ = dataset.apply(tf.data.experimental.assert_cardinality(SCREAMING_SNAKE_CASE ) )
lowercase__ = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
lowercase__ = dataset.shuffle(args.shuffle_buffer_size )
lowercase__ = dataset.batch(SCREAMING_SNAKE_CASE , drop_remainder=SCREAMING_SNAKE_CASE )
lowercase__ = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
lowercase__ = dataset.prefetch(SCREAMING_SNAKE_CASE )
return dataset
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not args.no_tpu:
lowercase__ = initialize_tpu(SCREAMING_SNAKE_CASE )
lowercase__ = tf.distribute.TPUStrategy(SCREAMING_SNAKE_CASE )
else:
lowercase__ = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase__ = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase__ = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase__ = tokenizer.vocab_size
lowercase__ = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase__ = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase__ = count_samples(SCREAMING_SNAKE_CASE )
lowercase__ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase__ = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase__ = TFAutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase__ , lowercase__ = create_optimizer(
num_train_steps=SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=SCREAMING_SNAKE_CASE , metrics=['''accuracy'''] )
def decode_fn(SCREAMING_SNAKE_CASE ):
lowercase__ = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase__ = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
def mask_with_collator(SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
lowercase__ = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase__ , lowercase__ = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=SCREAMING_SNAKE_CASE , )
return batch
lowercase__ = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase__ = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase__ = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , )
lowercase__ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=SCREAMING_SNAKE_CASE ) )
model.fit(
SCREAMING_SNAKE_CASE , validation_data=SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase = parse_args()
main(args)
| 43 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase = {'vinai/bartpho-syllable': 1024}
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(UpperCamelCase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 43 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "weight" in name:
lowercase__ = '''weight'''
elif "bias" in name:
lowercase__ = '''bias'''
else:
lowercase__ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = SEWConfig()
if is_finetuned:
lowercase__ = model.wav_encoder.wav_model.cfg
else:
lowercase__ = model.cfg
lowercase__ = fs_config.conv_bias
lowercase__ = eval(fs_config.conv_feature_layers )
lowercase__ = [x[0] for x in conv_layers]
lowercase__ = [x[1] for x in conv_layers]
lowercase__ = [x[2] for x in conv_layers]
lowercase__ = '''gelu'''
lowercase__ = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
lowercase__ = 0.0
lowercase__ = fs_config.activation_fn.name
lowercase__ = fs_config.encoder_embed_dim
lowercase__ = 0.02
lowercase__ = fs_config.encoder_ffn_embed_dim
lowercase__ = 1E-5
lowercase__ = fs_config.encoder_layerdrop
lowercase__ = fs_config.encoder_attention_heads
lowercase__ = fs_config.conv_pos_groups
lowercase__ = fs_config.conv_pos
lowercase__ = len(SCREAMING_SNAKE_CASE )
lowercase__ = fs_config.encoder_layers
lowercase__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase__ = model.cfg
lowercase__ = fs_config.final_dropout
lowercase__ = fs_config.layerdrop
lowercase__ = fs_config.activation_dropout
lowercase__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase__ = fs_config.attention_dropout
lowercase__ = fs_config.dropout_input
lowercase__ = fs_config.dropout
lowercase__ = fs_config.mask_channel_length
lowercase__ = fs_config.mask_channel_prob
lowercase__ = fs_config.mask_length
lowercase__ = fs_config.mask_prob
lowercase__ = '''Wav2Vec2FeatureExtractor'''
lowercase__ = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase__ = SEWConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
lowercase__ = convert_config(model[0] , SCREAMING_SNAKE_CASE )
lowercase__ = model[0].eval()
lowercase__ = True if config.feat_extract_norm == '''layer''' else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
if is_finetuned:
if dict_path:
lowercase__ = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
lowercase__ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
lowercase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = SEWForCTC(SCREAMING_SNAKE_CASE )
else:
lowercase__ = SEWModel(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 43 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
@staticmethod
def lowerCamelCase_ ( *UpperCamelCase_: Any , **UpperCamelCase_: Union[str, Any] ) -> int:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
@require_torch
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
lowercase__ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ = image_classifier(UpperCamelCase_ , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCamelCase_ ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowercase__ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , [
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
],
] , )
@require_tf
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ = image_classifier(UpperCamelCase_ , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowercase__ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , [
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )},
],
] , )
@slow
@require_torch
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ = image_classifier(UpperCamelCase_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowercase__ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def lowerCamelCase_ ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ = image_classifier(UpperCamelCase_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowercase__ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 43 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase = logging.getLogger()
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__ = parser.parse_args()
return args.f
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase__ = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'can\'t find {path}' )
return results
def _a ( ):
"""simple docstring"""
lowercase__ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( UpperCamelCase__ ):
@classmethod
def lowerCamelCase_ ( cls: int ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = 7 if get_gpu_count() > 1 else 2
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase_ )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
| 43 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = f'WER: {wer_result}\nCER: {cer_result}'
print(SCREAMING_SNAKE_CASE )
with open(f'{dataset_id}_eval_results.txt' , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = f'log_{dataset_id}_predictions.txt'
lowercase__ = f'log_{dataset_id}_targets.txt'
with open(SCREAMING_SNAKE_CASE , '''w''' ) as p, open(SCREAMING_SNAKE_CASE , '''w''' ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
p.write(f'{i}' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f'{i}' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(SCREAMING_SNAKE_CASE , with_indices=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(SCREAMING_SNAKE_CASE , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(SCREAMING_SNAKE_CASE ) )
return text
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=SCREAMING_SNAKE_CASE ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
lowerCAmelCase = parser.parse_args()
main(args)
| 43 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowercase__ = MaskFormerConfig(backbone_config=SCREAMING_SNAKE_CASE )
lowercase__ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowercase__ = 8_47
lowercase__ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowercase__ = 1_50
lowercase__ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowercase__ = 1_71
lowercase__ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowercase__ = 1_33
lowercase__ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowercase__ = 19
lowercase__ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowercase__ = 65
lowercase__ = '''mapillary-vistas-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
return config
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.layers.{i}.downsample.reduction.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'sem_seg_head.adapter_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', f'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', f'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', f'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', f'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.weight', f'mask_embedder.{i}.0.weight') )
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.bias', f'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE )
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
lowercase__ = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:dim, :]
lowercase__ = in_proj_bias[: dim]
lowercase__ = in_proj_weight[
dim : dim * 2, :
]
lowercase__ = in_proj_bias[
dim : dim * 2
]
lowercase__ = in_proj_weight[
-dim :, :
]
lowercase__ = in_proj_bias[-dim :]
# fmt: on
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
lowercase__ = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[: hidden_size, :]
lowercase__ = in_proj_bias[:config.hidden_size]
lowercase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
lowercase__ = in_proj_bias[hidden_size : hidden_size * 2]
lowercase__ = in_proj_weight[-hidden_size :, :]
lowercase__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
lowercase__ = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[: hidden_size, :]
lowercase__ = in_proj_bias[:config.hidden_size]
lowercase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
lowercase__ = in_proj_bias[hidden_size : hidden_size * 2]
lowercase__ = in_proj_weight[-hidden_size :, :]
lowercase__ = in_proj_bias[-hidden_size :]
# fmt: on
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
lowercase__ = get_maskformer_config(SCREAMING_SNAKE_CASE )
# load original state_dict
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
lowercase__ = pickle.load(SCREAMING_SNAKE_CASE )
lowercase__ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# update to torch tensors
for key, value in state_dict.items():
lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
# load 🤗 model
lowercase__ = MaskFormerForInstanceSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
for name, param in model.named_parameters():
print(SCREAMING_SNAKE_CASE , param.shape )
lowercase__ , lowercase__ = model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(SCREAMING_SNAKE_CASE ) == 0, f'Unexpected keys: {unexpected_keys}'
# verify results
lowercase__ = prepare_img()
if "vistas" in model_name:
lowercase__ = 65
elif "cityscapes" in model_name:
lowercase__ = 6_55_35
else:
lowercase__ = 2_55
lowercase__ = True if '''ade''' in model_name else False
lowercase__ = MaskFormerImageProcessor(ignore_index=SCREAMING_SNAKE_CASE , reduce_labels=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowercase__ = model(**SCREAMING_SNAKE_CASE )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowercase__ = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
image_processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 43 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 1 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = FunnelTokenizer
_lowercase : Optional[int] = FunnelTokenizerFast
_lowercase : int = True
_lowercase : Union[str, Any] = True
def lowerCamelCase_ ( self: Any ) -> str:
"""simple docstring"""
super().setUp()
lowercase__ = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase_: List[str] ) -> Tuple:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , **UpperCamelCase_: List[str] ) -> str:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = '''UNwant\u00E9d,running'''
lowercase__ = '''unwanted, running'''
return input_text, output_text
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
lowercase__ = tokenizer('''UNwant\u00E9d,running''' )
lowercase__ = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
lowercase__ = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 43 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 | 1 |
from __future__ import annotations
from fractions import Fraction
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = 11
lowercase__ = int('''1''' + '''0''' * digit_len )
for num in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
lowercase__ = 10
return solutions
def _a ( SCREAMING_SNAKE_CASE = 2 ):
"""simple docstring"""
lowercase__ = 1.0
for fraction in fraction_list(SCREAMING_SNAKE_CASE ):
lowercase__ = Fraction(SCREAMING_SNAKE_CASE )
result *= frac.denominator / frac.numerator
return int(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 43 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ): # This function is recursive
"""simple docstring"""
lowercase__ = len(SCREAMING_SNAKE_CASE )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(SCREAMING_SNAKE_CASE )]
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _a ( UpperCamelCase__ ):
_lowercase : Any = '''big_bird'''
def __init__( self: List[str] , UpperCamelCase_: List[Any]=50_358 , UpperCamelCase_: Dict=768 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Optional[int]=3_072 , UpperCamelCase_: Tuple="gelu_new" , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=4_096 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: int=1E-1_2 , UpperCamelCase_: Dict=True , UpperCamelCase_: Tuple=0 , UpperCamelCase_: Any=1 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: List[str]=66 , UpperCamelCase_: Any="block_sparse" , UpperCamelCase_: int=True , UpperCamelCase_: Dict=False , UpperCamelCase_: List[Any]=64 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , sep_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
lowercase__ = use_cache
lowercase__ = rescale_embeddings
lowercase__ = attention_type
lowercase__ = use_bias
lowercase__ = block_size
lowercase__ = num_random_blocks
lowercase__ = classifier_dropout
class _a ( UpperCamelCase__ ):
@property
def lowerCamelCase_ ( self: str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 43 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : Union[PIL.Image.Image, np.ndarray]
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowercase__ , lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowercase__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 43 | 1 |
lowerCAmelCase = 0 # The first color of the flag.
lowerCAmelCase = 1 # The second color of the flag.
lowerCAmelCase = 2 # The third color of the flag.
lowerCAmelCase = (red, white, blue)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not sequence:
return []
if len(SCREAMING_SNAKE_CASE ) == 1:
return list(SCREAMING_SNAKE_CASE )
lowercase__ = 0
lowercase__ = len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowercase__ , lowercase__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowercase__ , lowercase__ = sequence[high], sequence[mid]
high -= 1
else:
lowercase__ = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = input('Enter numbers separated by commas:\n').strip()
lowerCAmelCase = [int(item.strip()) for item in user_input.split(',')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 43 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase = 6_3_7_8_1_3_7.0
lowerCAmelCase = 6_3_5_6_7_5_2.3_1_4_2_4_5
lowerCAmelCase = 637_8137
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
lowercase__ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
lowercase__ = radians(SCREAMING_SNAKE_CASE )
lowercase__ = radians(SCREAMING_SNAKE_CASE )
# Equation
lowercase__ = sin((phi_a - phi_a) / 2 )
lowercase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase__ = sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE ) * cos(SCREAMING_SNAKE_CASE ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = set()
# Replace all the whitespace in our sentence
lowercase__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE ) == 26
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = [False] * 26
for char in input_str:
if char.islower():
lowercase__ = True
elif char.isupper():
lowercase__ = True
return all(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _a ( ):
"""simple docstring"""
from timeit import timeit
lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def is_in_circle(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase__ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
lowercase__ = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 1.0 , ):
"""simple docstring"""
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) for _ in range(SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 1.0 ):
"""simple docstring"""
def identity_function(SCREAMING_SNAKE_CASE ) -> float:
return x
lowercase__ = area_under_curve_estimator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print('''******************''' )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def function_to_integrate(SCREAMING_SNAKE_CASE ) -> float:
return sqrt(4.0 - x * x )
lowercase__ = area_under_curve_estimator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43 | 1 |
import heapq
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(SCREAMING_SNAKE_CASE , [-1 * len(SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase__ = heapq.heappop(SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase__ = elem[1][1].index(SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 43 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43 | 1 |
lowerCAmelCase = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 43 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 43 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCAmelCase = HfArgumentParser(InitializationArguments)
lowerCAmelCase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCAmelCase = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCAmelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCAmelCase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _a ( SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=1_00 , SCREAMING_SNAKE_CASE=10_26 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__ = generate_datasets(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , number=SCREAMING_SNAKE_CASE , min_len=10_26 , trim=SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
lowercase__ = load_gpta('''gpt2''' ).to(SCREAMING_SNAKE_CASE )
print('''computing perplexity on objective set''' )
lowercase__ = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).item()
print('''perplexity on objective set:''' , SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=15 , SCREAMING_SNAKE_CASE=1_28 , SCREAMING_SNAKE_CASE=1_00 , SCREAMING_SNAKE_CASE="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
lowercase__ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
lowercase__ = SecondaryLearner(SCREAMING_SNAKE_CASE )
# Train secondary learner
lowercase__ = train_secondary_learner(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_epochs=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , eval_freq=1_00 , igf_model_path=SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10_00 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=recopy_gpta , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , ):
"""simple docstring"""
lowercase__ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
lowercase__ = RandomSampler(SCREAMING_SNAKE_CASE )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE )
lowercase__ = max_steps // (len(SCREAMING_SNAKE_CASE )) + 1
lowercase__ = 0
lowercase__ = torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ = recopy_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE )
secondary_learner.eval()
lowercase__ = []
lowercase__ = 0
lowercase__ = []
lowercase__ = []
# Compute the performance of the transformer model at the beginning
lowercase__ = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE )
for epoch in range(int(SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
lowercase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
lowercase__ = True
if secondary_learner is not None:
lowercase__ = secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ = -1
if predicted_q < threshold:
lowercase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=SCREAMING_SNAKE_CASE , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=SCREAMING_SNAKE_CASE , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=SCREAMING_SNAKE_CASE , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=SCREAMING_SNAKE_CASE , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=SCREAMING_SNAKE_CASE , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=SCREAMING_SNAKE_CASE , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=SCREAMING_SNAKE_CASE , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=SCREAMING_SNAKE_CASE , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=SCREAMING_SNAKE_CASE , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowercase__ = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
lowercase__ = training_secondary_learner(
SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowercase__ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE , secondary_learner=SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 1 |
import re
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
lowercase__ = split_input(SCREAMING_SNAKE_CASE )
if upper:
lowercase__ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowercase__ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return to_simple_case(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
lowercase__ = to_simple_case(SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return to_complex_case(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''_''' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return to_complex_case(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 43 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 | 1 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _a ( UpperCamelCase__ ):
def __init__( self: int , UpperCamelCase_: pyspark.sql.DataFrame , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = "arrow" , **UpperCamelCase_: List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = load_from_cache_file
lowercase__ = file_format
lowercase__ = Spark(
df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase_ ( self: Tuple ) -> Dict:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 43 |
class _a :
def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 43 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
def __init__( self: Union[str, Any] , *UpperCamelCase_: Tuple , **UpperCamelCase_: str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 43 |
lowerCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase = {value: key for key, value in encode_dict.items()}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowercase__ = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | 1 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
)
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print(f'Optimal value : {minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import comet # From: unbabel-comet
import torch
import datasets
lowerCAmelCase = datasets.logging.get_logger(__name__)
lowerCAmelCase = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
lowerCAmelCase = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
lowerCAmelCase = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
if self.config_name == "default":
lowercase__ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
lowercase__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase_ ( self: int , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
if gpus is None:
lowercase__ = 1 if torch.cuda.is_available() else 0
lowercase__ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
lowercase__ = [dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) for t in zip(*data.values() )]
lowercase__ , lowercase__ = self.scorer.predict(UpperCamelCase_ , gpus=UpperCamelCase_ , progress_bar=UpperCamelCase_ )
return {"mean_score": mean_score, "scores": scores}
| 43 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase = {'vinai/bartpho-syllable': 1024}
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(UpperCamelCase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 43 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 43 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.