code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
print("Loading config file...")
def flatten_yaml_as_dict(_a , _a="" , _a="."):
SCREAMING_SNAKE_CASE : Dict = []
for k, v in d.items():
SCREAMING_SNAKE_CASE : List[str] = parent_key + sep + k if parent_key else k
if isinstance(_a , collections.abc.MutableMapping):
items.extend(flatten_yaml_as_dict(_a , _a , sep=_a).items())
else:
items.append((new_key, v))
return dict(_a)
SCREAMING_SNAKE_CASE : Optional[int] = argparse.Namespace()
with open(_a , "r") as yaml_file:
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = yaml.load(_a , Loader=yaml.FullLoader)
SCREAMING_SNAKE_CASE : Dict = flatten_yaml_as_dict(_a)
for k, v in flat_cfg.items():
setattr(_a , _a , _a)
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(_a , str(_a)))
return config
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Dict = MobileViTVaConfig()
SCREAMING_SNAKE_CASE : int = False
# dataset
if task_name.startswith("imagenet1k_"):
SCREAMING_SNAKE_CASE : str = 1000
if int(task_name.strip().split("_")[-1]) == 384:
SCREAMING_SNAKE_CASE : Dict = 384
else:
SCREAMING_SNAKE_CASE : Tuple = 256
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_"):
SCREAMING_SNAKE_CASE : List[Any] = 21000
if int(task_name.strip().split("_")[-1]) == 384:
SCREAMING_SNAKE_CASE : List[Any] = 384
else:
SCREAMING_SNAKE_CASE : Optional[Any] = 256
SCREAMING_SNAKE_CASE : List[Any] = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_"):
SCREAMING_SNAKE_CASE : str = 151
SCREAMING_SNAKE_CASE : Union[str, Any] = 512
SCREAMING_SNAKE_CASE : str = "ade20k-id2label.json"
SCREAMING_SNAKE_CASE : Union[str, Any] = True
elif task_name.startswith("voc_"):
SCREAMING_SNAKE_CASE : Dict = 21
SCREAMING_SNAKE_CASE : Optional[int] = 512
SCREAMING_SNAKE_CASE : Dict = "pascal-voc-id2label.json"
SCREAMING_SNAKE_CASE : Tuple = True
# orig_config
SCREAMING_SNAKE_CASE : Optional[Any] = load_orig_config_file(_a)
assert getattr(_a , "model.classification.name" , -1) == "mobilevit_v2", "Invalid model"
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_a , "model.classification.mitv2.width_multiplier" , 1.0)
assert (
getattr(_a , "model.classification.mitv2.attn_norm_layer" , -1) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
SCREAMING_SNAKE_CASE : List[Any] = getattr(_a , "model.classification.activation.name" , "swish")
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
SCREAMING_SNAKE_CASE : List[Any] = getattr(_a , "model.segmentation.output_stride" , 16)
if "_deeplabv3" in task_name:
SCREAMING_SNAKE_CASE : int = getattr(_a , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36])
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(_a , "model.segmentation.deeplabv3.aspp_out_channels" , 512)
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_a , "model.segmentation.deeplabv3.aspp_dropout" , 0.1)
# id2label
SCREAMING_SNAKE_CASE : int = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r"))
SCREAMING_SNAKE_CASE : Optional[int] = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Tuple = dct.pop(_a)
SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ ( _a , _a=False):
if base_model:
SCREAMING_SNAKE_CASE : int = ""
else:
SCREAMING_SNAKE_CASE : Optional[int] = "mobilevitv2."
SCREAMING_SNAKE_CASE : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
SCREAMING_SNAKE_CASE : Optional[Any] = k[8:]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = k
if ".block." in k:
SCREAMING_SNAKE_CASE : List[Any] = k_new.replace(".block." , ".")
if ".conv." in k:
SCREAMING_SNAKE_CASE : Tuple = k_new.replace(".conv." , ".convolution.")
if ".norm." in k:
SCREAMING_SNAKE_CASE : Dict = k_new.replace(".norm." , ".normalization.")
if "conv_1." in k:
SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace("conv_1." , f"{model_prefix}conv_stem.")
for i in [1, 2]:
if f"layer_{i}." in k:
SCREAMING_SNAKE_CASE : Dict = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer.")
if ".exp_1x1." in k:
SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace(".exp_1x1." , ".expand_1x1.")
if ".red_1x1." in k:
SCREAMING_SNAKE_CASE : Dict = k_new.replace(".red_1x1." , ".reduce_1x1.")
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer.")
if f"layer_{i}.1.local_rep.0." in k:
SCREAMING_SNAKE_CASE : str = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk.")
if f"layer_{i}.1.local_rep.1." in k:
SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1.")
for i in [3, 4, 5]:
if i == 3:
SCREAMING_SNAKE_CASE : Dict = [0, 1]
elif i == 4:
SCREAMING_SNAKE_CASE : str = [0, 1, 2, 3]
elif i == 5:
SCREAMING_SNAKE_CASE : List[Any] = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
SCREAMING_SNAKE_CASE : List[str] = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.")
if f"layer_{i}.1.global_rep.{j+1}." in k:
SCREAMING_SNAKE_CASE : List[Any] = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm.")
if f"layer_{i}.1.conv_proj." in k:
SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection.")
if "pre_norm_attn.0." in k:
SCREAMING_SNAKE_CASE : Tuple = k_new.replace("pre_norm_attn.0." , "layernorm_before.")
if "pre_norm_attn.1." in k:
SCREAMING_SNAKE_CASE : int = k_new.replace("pre_norm_attn.1." , "attention.")
if "pre_norm_ffn.0." in k:
SCREAMING_SNAKE_CASE : Any = k_new.replace("pre_norm_ffn.0." , "layernorm_after.")
if "pre_norm_ffn.1." in k:
SCREAMING_SNAKE_CASE : List[Any] = k_new.replace("pre_norm_ffn.1." , "ffn.conv1.")
if "pre_norm_ffn.3." in k:
SCREAMING_SNAKE_CASE : Any = k_new.replace("pre_norm_ffn.3." , "ffn.conv2.")
if "classifier.1." in k:
SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace("classifier.1." , "classifier.")
if "seg_head." in k:
SCREAMING_SNAKE_CASE : List[Any] = k_new.replace("seg_head." , "segmentation_head.")
if ".aspp_layer." in k:
SCREAMING_SNAKE_CASE : int = k_new.replace(".aspp_layer." , ".")
if ".aspp_pool." in k:
SCREAMING_SNAKE_CASE : Tuple = k_new.replace(".aspp_pool." , ".")
rename_keys.append((k, k_new))
return rename_keys
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[str] = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head."):
keys_to_ignore.append(_a)
for k in keys_to_ignore:
state_dict.pop(_a , _a)
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = get_mobilevitva_config(_a , _a)
# load original state_dict
SCREAMING_SNAKE_CASE : Any = torch.load(_a , map_location="cpu")
# load huggingface model
if task_name.startswith("ade20k_") or task_name.startswith("voc_"):
SCREAMING_SNAKE_CASE : List[str] = MobileViTVaForSemanticSegmentation(_a).eval()
SCREAMING_SNAKE_CASE : Optional[Any] = False
else:
SCREAMING_SNAKE_CASE : List[Any] = MobileViTVaForImageClassification(_a).eval()
SCREAMING_SNAKE_CASE : str = False
# remove and rename some keys of load the original model
SCREAMING_SNAKE_CASE : Dict = checkpoint
remove_unused_keys(_a)
SCREAMING_SNAKE_CASE : Dict = create_rename_keys(_a , base_model=_a)
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_a , _a , _a)
# load modified state_dict
model.load_state_dict(_a)
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=prepare_img() , return_tensors="pt")
SCREAMING_SNAKE_CASE : List[Any] = model(**_a)
# verify classification model
if task_name.startswith("imagenet"):
SCREAMING_SNAKE_CASE : Any = outputs.logits
SCREAMING_SNAKE_CASE : Dict = logits.argmax(-1).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx])
if task_name.startswith("imagenet1k_256") and config.width_multiplier == 1.0:
# expected_logits for base variant
SCREAMING_SNAKE_CASE : Any = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01])
assert torch.allclose(logits[0, :3] , _a , atol=1E-4)
Path(_a).mkdir(exist_ok=_a)
print(f"Saving model {task_name} to {pytorch_dump_folder_path}")
model.save_pretrained(_a)
print(f"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
a_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
) | 25 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =42
# setable values
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =None
@classmethod
def __UpperCamelCase ( cls : Tuple , a : CommonSchedulerState , a : jnp.ndarray , a : jnp.ndarray ) -> Dict:
"""simple docstring"""
return cls(common=a , init_noise_sigma=a , timesteps=a )
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ =[e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase__ =42
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return True
@register_to_config
def __init__( self : str , a : int = 1000 , a : float = 0.0001 , a : float = 0.02 , a : str = "linear" , a : Optional[jnp.ndarray] = None , a : str = "fixed_small" , a : bool = True , a : str = "epsilon" , a : jnp.dtype = jnp.floataa , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = dtype
def __UpperCamelCase ( self : Any , a : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
SCREAMING_SNAKE_CASE : Optional[int] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : Tuple = jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=a , init_noise_sigma=a , timesteps=a , )
def __UpperCamelCase ( self : List[Any] , a : DDPMSchedulerState , a : jnp.ndarray , a : Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def __UpperCamelCase ( self : Optional[int] , a : DDPMSchedulerState , a : int , a : Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE : Dict = (jnp.arange(0 , a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=a , timesteps=a , )
def __UpperCamelCase ( self : Optional[Any] , a : DDPMSchedulerState , a : List[str] , a : Any=None , a : Dict=None ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE : str = jnp.clip(a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE : Any = jnp.log(jnp.clip(a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE : int = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE : str = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE : Any = variance
SCREAMING_SNAKE_CASE : int = state.common.betas[t]
SCREAMING_SNAKE_CASE : Dict = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCamelCase ( self : Any , a : DDPMSchedulerState , a : jnp.ndarray , a : int , a : jnp.ndarray , a : Optional[jax.random.KeyArray] = None , a : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = timestep
if key is None:
SCREAMING_SNAKE_CASE : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.split(a , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE : int = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE : Tuple = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE : str = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE : Tuple = jnp.clip(a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE : Optional[int] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE : int = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(a , num=1 )
SCREAMING_SNAKE_CASE : Dict = jax.random.normal(a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(a , a , predicted_variance=a ) ** 0.5) * noise
SCREAMING_SNAKE_CASE : str = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=a , state=a )
def __UpperCamelCase ( self : Union[str, Any] , a : DDPMSchedulerState , a : jnp.ndarray , a : jnp.ndarray , a : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , a , a , a )
def __UpperCamelCase ( self : str , a : DDPMSchedulerState , a : jnp.ndarray , a : jnp.ndarray , a : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , a , a , a )
def __len__( self : int ) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps | 25 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 1 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a_ = logging.get_logger('transformers.models.speecht5')
a_ = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
a_ = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
a_ = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
a_ = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
a_ = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
a_ = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
a_ = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
a_ = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
a_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a_ = []
a_ = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
a_ = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
a_ = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
a_ = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
for attribute in key.split("."):
SCREAMING_SNAKE_CASE : Any = getattr(_a , _a)
if weight_type is not None:
SCREAMING_SNAKE_CASE : str = getattr(_a , _a).shape
else:
SCREAMING_SNAKE_CASE : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}")
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : int = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Tuple = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE : Dict = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE : Dict = value
else:
SCREAMING_SNAKE_CASE : int = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.")
def lowerCamelCase__ ( _a , _a):
for key in ignore_keys:
if key.endswith(".*"):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = key.split(".*.")
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : List[str] = []
if task == "s2t":
SCREAMING_SNAKE_CASE : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE : Optional[int] = MAPPING_S2T
SCREAMING_SNAKE_CASE : Any = IGNORE_KEYS_S2T
elif task == "t2s":
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Optional[Any] = MAPPING_T2S
SCREAMING_SNAKE_CASE : Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
SCREAMING_SNAKE_CASE : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE : Union[str, Any] = MAPPING_S2S
SCREAMING_SNAKE_CASE : str = IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}")
for name, value in fairseq_dict.items():
if should_ignore(_a , _a):
logger.info(f"{name} was ignored")
continue
SCREAMING_SNAKE_CASE : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE : Tuple = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = key.split(".*.")
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE : str = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
SCREAMING_SNAKE_CASE : int = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Dict = name.split(_a)[0].split(".")[-2]
SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace("*" , _a)
if "weight_g" in name:
SCREAMING_SNAKE_CASE : str = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : List[Any] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE : Optional[int] = "bias"
elif "weight" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = "weight"
elif "running_mean" in name:
SCREAMING_SNAKE_CASE : Any = "running_mean"
elif "running_var" in name:
SCREAMING_SNAKE_CASE : Tuple = "running_var"
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE : Dict = "num_batches_tracked"
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
set_recursively(_a , _a , _a , _a , _a)
continue
if not is_used:
unused_weights.append(_a)
logger.warning(f"Unused weights: {unused_weights}")
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split("conv_layers.")[-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(".")
SCREAMING_SNAKE_CASE : Dict = int(items[0])
SCREAMING_SNAKE_CASE : Union[str, Any] = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(_a)
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a , _a=None , _a=None , _a=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE : int = SpeechTaConfig.from_pretrained(_a)
else:
SCREAMING_SNAKE_CASE : Tuple = SpeechTaConfig()
if task == "s2t":
SCREAMING_SNAKE_CASE : int = config.max_text_positions
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaForSpeechToText(_a)
elif task == "t2s":
SCREAMING_SNAKE_CASE : Any = 1876
SCREAMING_SNAKE_CASE : Dict = 600
SCREAMING_SNAKE_CASE : Tuple = config.max_speech_positions
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaForTextToSpeech(_a)
elif task == "s2s":
SCREAMING_SNAKE_CASE : Union[str, Any] = 1876
SCREAMING_SNAKE_CASE : Tuple = config.max_speech_positions
SCREAMING_SNAKE_CASE : int = SpeechTaForSpeechToSpeech(_a)
else:
raise ValueError(f"Unknown task name: {task}")
if vocab_path:
SCREAMING_SNAKE_CASE : List[str] = SpeechTaTokenizer(_a , model_max_length=config.max_text_positions)
# Mask token behaves like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken("<mask>" , lstrip=_a , rstrip=_a)
SCREAMING_SNAKE_CASE : Tuple = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token})
tokenizer.add_tokens(["<ctc_blank>"])
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE : int = SpeechTaProcessor(tokenizer=_a , feature_extractor=_a)
processor.save_pretrained(_a)
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_a)
recursively_load_weights(fairseq_checkpoint["model"] , _a , _a)
model.save_pretrained(_a)
if repo_id:
print("Pushing to the hub...")
processor.push_to_hub(_a)
model.push_to_hub(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
a_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 25 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
a_ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
a_ = {
'ctrl': 256,
}
a_ = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Any = set()
SCREAMING_SNAKE_CASE : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
SCREAMING_SNAKE_CASE : Optional[int] = char
SCREAMING_SNAKE_CASE : List[str] = set(_a)
return pairs
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =CONTROL_CODES
def __init__( self : List[Any] , a : int , a : str , a : Optional[Any]="<unk>" , **a : List[str] ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=a , **a )
with open(a , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE : List[Any] = json.load(a )
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE : Dict = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE : str = [tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE : List[str] = {}
@property
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self : List[str] , a : Union[str, Any] ) -> Any:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : Tuple = tuple(a )
SCREAMING_SNAKE_CASE : Dict = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
SCREAMING_SNAKE_CASE : Tuple = get_pairs(a )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = bigram
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Any = 0
while i < len(a ):
try:
SCREAMING_SNAKE_CASE : Tuple = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : List[Any] = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : Tuple = tuple(a )
SCREAMING_SNAKE_CASE : str = new_word
if len(a ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Dict = get_pairs(a )
SCREAMING_SNAKE_CASE : Tuple = "@@ ".join(a )
SCREAMING_SNAKE_CASE : Tuple = word[:-4]
SCREAMING_SNAKE_CASE : str = word
return word
def __UpperCamelCase ( self : Union[str, Any] , a : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Tuple = re.findall(R"\S+\n?" , a )
for token in words:
split_tokens.extend(list(self.bpe(a ).split(" " ) ) )
return split_tokens
def __UpperCamelCase ( self : Tuple , a : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Any , a : int ) -> Tuple:
"""simple docstring"""
return self.decoder.get(a , self.unk_token )
def __UpperCamelCase ( self : Dict , a : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = " ".join(a ).replace("@@ " , "" ).strip()
return out_string
def __UpperCamelCase ( self : int , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE : Any = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
SCREAMING_SNAKE_CASE : List[str] = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE : Dict = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far) | 25 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ ='swin'
lowerCamelCase__ ={
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , a : Tuple=224 , a : Optional[Any]=4 , a : Optional[Any]=3 , a : Optional[Any]=96 , a : Union[str, Any]=[2, 2, 6, 2] , a : Tuple=[3, 6, 12, 24] , a : Union[str, Any]=7 , a : List[str]=4.0 , a : Optional[Any]=True , a : Union[str, Any]=0.0 , a : Optional[int]=0.0 , a : Union[str, Any]=0.1 , a : List[Any]="gelu" , a : Union[str, Any]=False , a : Any=0.02 , a : str=1e-5 , a : Any=32 , a : Optional[int]=None , a : str=None , **a : Dict , ) -> Any:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Any = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a )
SCREAMING_SNAKE_CASE : Dict = num_heads
SCREAMING_SNAKE_CASE : Optional[int] = window_size
SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : int = qkv_bias
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : int = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(a ) - 1) )
SCREAMING_SNAKE_CASE : Union[str, Any] = ["stem"] + [F"stage{idx}" for idx in range(1 , len(a ) + 1 )]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __UpperCamelCase ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1e-4 | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = min(_a), max(_a)
SCREAMING_SNAKE_CASE : Dict = int(max_value - min_value) + 1
SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_a)]
for i in my_list:
buckets[int(i - min_value)].append(_a)
return [v for bucket in buckets for v in sorted(_a)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 25 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE : Any = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token"))
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings"))
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"))
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"))
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight"))
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight"))
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias"))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
])
# fmt: on
return rename_keys
def lowerCamelCase__ ( _a , _a , _a=False):
for i in range(config.num_hidden_layers):
if base_model:
SCREAMING_SNAKE_CASE : Any = ""
else:
SCREAMING_SNAKE_CASE : Optional[int] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight")
SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : str = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : str = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_a , _a)
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Any = dct.pop(_a)
SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a=False):
SCREAMING_SNAKE_CASE : Any = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_a , )
SCREAMING_SNAKE_CASE : Tuple = ViTHybridConfig(backbone_config=_a , image_size=384 , num_labels=1000)
SCREAMING_SNAKE_CASE : Any = False
# load original model from timm
SCREAMING_SNAKE_CASE : Any = timm.create_model(_a , pretrained=_a)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a)
SCREAMING_SNAKE_CASE : Dict = create_rename_keys(_a , _a)
for src, dest in rename_keys:
rename_key(_a , _a , _a)
read_in_q_k_v(_a , _a , _a)
SCREAMING_SNAKE_CASE : List[str] = "huggingface/label-files"
SCREAMING_SNAKE_CASE : List[Any] = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r"))
SCREAMING_SNAKE_CASE : List[Any] = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[str] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE : Dict = ViTHybridModel(_a).eval()
else:
SCREAMING_SNAKE_CASE : Dict = ViTHybridForImageClassification(_a).eval()
model.load_state_dict(_a)
# create image processor
SCREAMING_SNAKE_CASE : int = create_transform(**resolve_data_config({} , model=_a))
SCREAMING_SNAKE_CASE : Tuple = transform.transforms
SCREAMING_SNAKE_CASE : Optional[int] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : List[str] = ViTHybridImageProcessor(
do_resize=_a , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_a , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = transform(_a).unsqueeze(0)
SCREAMING_SNAKE_CASE : str = processor(_a , return_tensors="pt").pixel_values
# verify pixel values
assert torch.allclose(_a , _a)
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(_a)
SCREAMING_SNAKE_CASE : int = outputs.logits
print("Predicted class:" , logits.argmax(-1).item())
if base_model:
SCREAMING_SNAKE_CASE : List[Any] = timm_model.forward_features(_a)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1E-3)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = timm_model(_a)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
Path(_a).mkdir(exist_ok=_a)
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(_a)
print(f"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(_a)
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(f"ybelkada/{vit_name}")
processor.push_to_hub(f"ybelkada/{vit_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
a_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 25 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 | 1 |
import math
def lowerCamelCase__ ( _a = 100):
SCREAMING_SNAKE_CASE : Tuple = sum(i * i for i in range(1 , n + 1))
SCREAMING_SNAKE_CASE : Optional[int] = int(math.pow(sum(range(1 , n + 1)) , 2))
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''') | 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 | 1 |
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))')) | 25 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 | 1 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
a_ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Dict = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
SCREAMING_SNAKE_CASE : Union[str, Any] = bs[:]
SCREAMING_SNAKE_CASE : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(_a)
cs.append(2**8 + n)
n += 1
SCREAMING_SNAKE_CASE : int = [chr(_a) for n in cs]
return dict(zip(_a , _a))
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
SCREAMING_SNAKE_CASE : Optional[int] = char
return pairs
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , a : Any , a : List[str] , a : Union[str, Any]="replace" , a : Optional[Any]="<s>" , a : Tuple="</s>" , a : str="</s>" , a : Optional[int]="<s>" , a : Dict="<unk>" , a : Dict="<pad>" , a : List[Any]="<mask>" , a : int=False , **a : int , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
SCREAMING_SNAKE_CASE : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
SCREAMING_SNAKE_CASE : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
SCREAMING_SNAKE_CASE : List[str] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
SCREAMING_SNAKE_CASE : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE : Any = json.load(a )
SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE : List[str] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE : Union[str, Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE : Dict = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE : Dict = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE : int = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self : int , a : str ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : int = tuple(a )
SCREAMING_SNAKE_CASE : Dict = get_pairs(a )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : Tuple = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = bigram
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while i < len(a ):
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : Optional[int] = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : Tuple = tuple(a )
SCREAMING_SNAKE_CASE : str = new_word
if len(a ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Any = get_pairs(a )
SCREAMING_SNAKE_CASE : Dict = " ".join(a )
SCREAMING_SNAKE_CASE : str = word
return word
def __UpperCamelCase ( self : Optional[int] , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = []
for token in re.findall(self.pat , a ):
SCREAMING_SNAKE_CASE : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Any , a : str ) -> Dict:
"""simple docstring"""
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Any , a : str ) -> Optional[int]:
"""simple docstring"""
return self.decoder.get(a )
def __UpperCamelCase ( self : Any , a : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "".join(a )
SCREAMING_SNAKE_CASE : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
SCREAMING_SNAKE_CASE : Optional[int] = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE : Union[str, Any] = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : List[str]=False , **a : List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE : List[str] = " " + text
return (text, kwargs)
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> Dict:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Union[str, Any] , a : "Conversation" ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(a )
SCREAMING_SNAKE_CASE : Dict = " ".join(a )
SCREAMING_SNAKE_CASE : Dict = self.encode(a )
if len(a ) > self.model_max_length:
SCREAMING_SNAKE_CASE : int = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 25 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 10
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 3, 4]
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a )
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = process_story(a )
self.assertEqual(a , [] )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = process_story(a )
self.assertEqual(a , [] )
self.assertEqual(a , [] )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = process_story(a )
SCREAMING_SNAKE_CASE : Tuple = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(a , a )
SCREAMING_SNAKE_CASE : List[Any] = ["It was the best of times."]
self.assertEqual(a , a )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = torch.tensor([1, 2, 3, 4] )
SCREAMING_SNAKE_CASE : Any = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(a , 0 ).numpy() , expected.numpy() )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a , 23 ).numpy() , expected.numpy() )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a , 1 ).numpy() , expected.numpy() )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 101
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
SCREAMING_SNAKE_CASE : Tuple = compute_token_type_ids(a , a )
np.testing.assert_array_equal(a , a ) | 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ = logging.get_logger(__name__)
a_ = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='bart'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str , a : Optional[int]=5_0265 , a : Any=1024 , a : Tuple=12 , a : Optional[Any]=4096 , a : Any=16 , a : Optional[int]=12 , a : Any=4096 , a : List[Any]=16 , a : Union[str, Any]=0.0 , a : List[str]=0.0 , a : List[Any]="gelu" , a : Dict=1024 , a : Union[str, Any]=0.1 , a : Any=0.0 , a : Union[str, Any]=0.0 , a : Union[str, Any]=0.02 , a : Optional[int]=0.0 , a : Tuple=False , a : Dict=True , a : Optional[int]=3 , a : Union[str, Any]=1 , a : str=0 , a : Dict=2 , a : Any=True , a : str=2 , a : Optional[Any]=2 , **a : List[str] , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[str] = decoder_layers
SCREAMING_SNAKE_CASE : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE : Tuple = dropout
SCREAMING_SNAKE_CASE : int = attention_dropout
SCREAMING_SNAKE_CASE : Dict = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[Any] = init_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Dict = decoder_layerdrop
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=a , pad_token_id=a , bos_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , a ):
SCREAMING_SNAKE_CASE : List[str] = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed." )
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Any = {0: "batch"}
SCREAMING_SNAKE_CASE : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE : Tuple = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self.num_layers
for i in range(a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE : int = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = super().outputs
else:
SCREAMING_SNAKE_CASE : str = super(a , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.num_layers
for i in range(a ):
SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE : Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCamelCase ( self : Optional[Any] , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , a , a , a , a )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : str = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , a , a , a , a )
SCREAMING_SNAKE_CASE : Dict = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = dict(**a , **a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE : Tuple = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(a , a )] , dim=1 )
SCREAMING_SNAKE_CASE : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self.num_layers
SCREAMING_SNAKE_CASE : Any = min(a , a )
SCREAMING_SNAKE_CASE : Any = max(a , a ) - min_num_layers
SCREAMING_SNAKE_CASE : int = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(a ):
common_inputs["past_key_values"].append(
(
torch.zeros(a ),
torch.zeros(a ),
torch.zeros(a ),
torch.zeros(a ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(a , a ):
common_inputs["past_key_values"].append((torch.zeros(a ), torch.zeros(a )) )
return common_inputs
def __UpperCamelCase ( self : Any , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , a , a , a , a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : Any = seqlen + 2
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.num_layers
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(
[common_inputs["attention_mask"], torch.ones(a , a , dtype=a )] , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(a )
]
return common_inputs
def __UpperCamelCase ( self : Tuple , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(a )
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : str = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Tuple = dict(tokenizer(a , return_tensors=a ) )
return common_inputs
def __UpperCamelCase ( self : Optional[int] , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
else:
SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
return common_inputs
def __UpperCamelCase ( self : Optional[Any] , a : Tuple , a : Tuple , a : int , a : Tuple ) -> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[int] = super()._flatten_past_key_values_(a , a , a , a )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = super(a , self )._flatten_past_key_values_(
a , a , a , a ) | 25 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 1 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 | 1 |
import gc
import threading
import time
import psutil
import torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = psutil.Process()
SCREAMING_SNAKE_CASE : int = False
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = -1
while True:
SCREAMING_SNAKE_CASE : Tuple = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
self.thread.start()
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = False
self.thread.join()
return self.cpu_memory_peak
a_ = PeakCPUMemory()
def lowerCamelCase__ ( ):
# Time
SCREAMING_SNAKE_CASE : Optional[int] = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE : List[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count()):
SCREAMING_SNAKE_CASE : Dict = torch.cuda.memory_allocated(_a)
torch.cuda.reset_peak_memory_stats()
return measures
def lowerCamelCase__ ( _a):
# Time
SCREAMING_SNAKE_CASE : Optional[int] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE : Tuple = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
SCREAMING_SNAKE_CASE : Optional[Any] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count()):
SCREAMING_SNAKE_CASE : Tuple = (torch.cuda.memory_allocated(_a) - start_measures[str(_a)]) / 2**20
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.cuda.max_memory_allocated(_a) - start_measures[str(_a)]) / 2**20
return measures
def lowerCamelCase__ ( _a , _a):
print(f"{description}:")
print(f"- Time: {measures['time']:.2f}s")
for i in range(torch.cuda.device_count()):
print(f"- GPU {i} allocated: {measures[str(_a)]:.2f}MiB")
SCREAMING_SNAKE_CASE : Tuple = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB")
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB")
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB") | 25 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='roformer'
def __init__( self : Dict , a : Any=5_0000 , a : List[Any]=None , a : str=768 , a : str=12 , a : Tuple=12 , a : Optional[Any]=3072 , a : List[str]="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=1536 , a : List[str]=2 , a : Tuple=0.02 , a : Any=1e-12 , a : Optional[int]=0 , a : Union[str, Any]=False , a : int=True , **a : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = rotary_value
SCREAMING_SNAKE_CASE : int = use_cache
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "sequence"}
SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 25 | 1 |
a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = input("Enter message: ")
SCREAMING_SNAKE_CASE : Dict = input("Enter key [alphanumeric]: ")
SCREAMING_SNAKE_CASE : str = input("Encrypt/Decrypt [e/d]: ")
if mode.lower().startswith("e"):
SCREAMING_SNAKE_CASE : Union[str, Any] = "encrypt"
SCREAMING_SNAKE_CASE : Dict = encrypt_message(_a , _a)
elif mode.lower().startswith("d"):
SCREAMING_SNAKE_CASE : Dict = "decrypt"
SCREAMING_SNAKE_CASE : Dict = decrypt_message(_a , _a)
print(f"\n{mode.title()}ed message:")
print(_a)
def lowerCamelCase__ ( _a , _a):
return translate_message(_a , _a , "encrypt")
def lowerCamelCase__ ( _a , _a):
return translate_message(_a , _a , "decrypt")
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Tuple = key.upper()
for symbol in message:
SCREAMING_SNAKE_CASE : int = LETTERS.find(symbol.upper())
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index])
elif mode == "decrypt":
num -= LETTERS.find(key[key_index])
num %= len(_a)
if symbol.isupper():
translated.append(LETTERS[num])
elif symbol.islower():
translated.append(LETTERS[num].lower())
key_index += 1
if key_index == len(_a):
SCREAMING_SNAKE_CASE : str = 0
else:
translated.append(_a)
return "".join(_a)
if __name__ == "__main__":
main() | 25 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cécé herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Dict = torch.load(_a , lambda _a , _a: storage)
SCREAMING_SNAKE_CASE : str = AbsSummarizer(_a , torch.device("cpu") , _a)
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(_a , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("This is sample éàalj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample 3 éàalj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids
SCREAMING_SNAKE_CASE : List[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : Optional[int] = original(_a , _a , _a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Dict = original.generator(_a)
SCREAMING_SNAKE_CASE : Any = new_model(
_a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Tuple = new_model.generator(_a)
SCREAMING_SNAKE_CASE : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : int = torch.allclose(_a , _a , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 25 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='beit'
def __init__( self : str , a : List[Any]=8192 , a : int=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Any=3072 , a : Union[str, Any]="gelu" , a : str=0.0 , a : Tuple=0.0 , a : Optional[Any]=0.02 , a : Optional[int]=1e-12 , a : List[str]=224 , a : Union[str, Any]=16 , a : Optional[Any]=3 , a : Union[str, Any]=False , a : int=False , a : List[Any]=False , a : Optional[Any]=False , a : List[Any]=0.1 , a : List[Any]=0.1 , a : Tuple=True , a : Tuple=[3, 5, 7, 11] , a : Optional[int]=[1, 2, 3, 6] , a : Optional[Any]=True , a : Dict=0.4 , a : Tuple=256 , a : int=1 , a : List[str]=False , a : Union[str, Any]=255 , **a : int , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = use_mask_token
SCREAMING_SNAKE_CASE : Optional[int] = use_absolute_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = use_relative_position_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = use_shared_relative_position_bias
SCREAMING_SNAKE_CASE : str = layer_scale_init_value
SCREAMING_SNAKE_CASE : str = drop_path_rate
SCREAMING_SNAKE_CASE : List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE : Optional[int] = out_indices
SCREAMING_SNAKE_CASE : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE : Optional[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE : Any = auxiliary_loss_weight
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
SCREAMING_SNAKE_CASE : int = auxiliary_num_convs
SCREAMING_SNAKE_CASE : List[str] = auxiliary_concat_input
SCREAMING_SNAKE_CASE : Optional[Any] = semantic_loss_ignore_index
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def __UpperCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __UpperCamelCase ( self : str ) -> float:
"""simple docstring"""
return 1e-4 | 25 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 25 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE : int = [sys.executable] + distributed_args
execute_subprocess_async(a , env=os.environ.copy() ) | 25 |
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 25 | 1 |
import string
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = ""
for i in sequence:
SCREAMING_SNAKE_CASE : int = ord(_a)
if 65 <= extract <= 90:
output += chr(155 - extract)
elif 97 <= extract <= 122:
output += chr(219 - extract)
else:
output += i
return output
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = string.ascii_letters
SCREAMING_SNAKE_CASE : Any = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_a)] if c in letters else c for c in sequence)
def lowerCamelCase__ ( ):
from timeit import timeit
print("Running performance benchmarks...")
SCREAMING_SNAKE_CASE : Dict = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_a)} seconds")
print(f"> atbash(): {timeit('atbash(printable)' , setup=_a)} seconds")
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark() | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='van'
def __init__( self : Optional[Any] , a : Union[str, Any]=224 , a : List[str]=3 , a : Optional[Any]=[7, 3, 3, 3] , a : Tuple=[4, 2, 2, 2] , a : Any=[64, 128, 320, 512] , a : Tuple=[3, 3, 12, 3] , a : str=[8, 8, 4, 4] , a : Any="gelu" , a : List[str]=0.02 , a : Optional[int]=1e-6 , a : Optional[int]=1e-2 , a : Dict=0.0 , a : List[Any]=0.0 , **a : Union[str, Any] , ) -> str:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_sizes
SCREAMING_SNAKE_CASE : Tuple = strides
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Optional[int] = mlp_ratios
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = layer_scale_init_value
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 |
from math import factorial, pi
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_sin() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : int = float(_a)
SCREAMING_SNAKE_CASE : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(_a))
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_cos() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : str = float(_a)
SCREAMING_SNAKE_CASE : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(_a))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15)) | 25 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers | 25 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 | 1 |
import argparse
import struct
import unittest
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : bytes ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = data
# Initialize hash values
SCREAMING_SNAKE_CASE : Tuple = [
0x6A_09E_667,
0xBB_67A_E85,
0x3C_6EF_372,
0xA5_4FF_53A,
0x51_0E5_27F,
0x9B_056_88C,
0x1F_83D_9AB,
0x5B_E0C_D19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE : str = [
0x42_8A2_F98,
0x71_374_491,
0xB5_C0F_BCF,
0xE9_B5D_BA5,
0x39_56C_25B,
0x59_F11_1F1,
0x92_3F8_2A4,
0xAB_1C5_ED5,
0xD8_07A_A98,
0x12_835_B01,
0x24_318_5BE,
0x55_0C7_DC3,
0x72_BE5_D74,
0x80_DEB_1FE,
0x9B_DC0_6A7,
0xC1_9BF_174,
0xE4_9B6_9C1,
0xEF_BE4_786,
0x0F_C19_DC6,
0x24_0CA_1CC,
0x2D_E92_C6F,
0x4A_748_4AA,
0x5C_B0A_9DC,
0x76_F98_8DA,
0x98_3E5_152,
0xA8_31C_66D,
0xB0_032_7C8,
0xBF_597_FC7,
0xC6_E00_BF3,
0xD5_A79_147,
0x06_CA6_351,
0x14_292_967,
0x27_B70_A85,
0x2E_1B2_138,
0x4D_2C6_DFC,
0x53_380_D13,
0x65_0A7_354,
0x76_6A0_ABB,
0x81_C2C_92E,
0x92_722_C85,
0xA2_BFE_8A1,
0xA8_1A6_64B,
0xC2_4B8_B70,
0xC7_6C5_1A3,
0xD1_92E_819,
0xD6_990_624,
0xF4_0E3_585,
0x10_6AA_070,
0x19_A4C_116,
0x1E_376_C08,
0x27_487_74C,
0x34_B0B_CB5,
0x39_1C0_CB3,
0x4E_D8A_A4A,
0x5B_9CC_A4F,
0x68_2E6_FF3,
0x74_8F8_2EE,
0x78_A56_36F,
0x84_C87_814,
0x8C_C70_208,
0x90_BEF_FFA,
0xA4_506_CEB,
0xBE_F9A_3F7,
0xC6_717_8F2,
]
SCREAMING_SNAKE_CASE : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __UpperCamelCase ( a : bytes ) -> bytes:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = B"\x80" + (B"\x00" * (63 - (len(a ) + 8) % 64))
SCREAMING_SNAKE_CASE : Optional[int] = struct.pack(">Q" , (len(a ) * 8) )
return data + padding + big_endian_integer
def __UpperCamelCase ( self : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE : Any = list(struct.unpack(">16L" , a ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE : str = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
SCREAMING_SNAKE_CASE : List[str] = self.ror(a , 6 ) ^ self.ror(a , 11 ) ^ self.ror(a , 25 )
SCREAMING_SNAKE_CASE : Tuple = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g)
SCREAMING_SNAKE_CASE : Tuple = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
SCREAMING_SNAKE_CASE : Any = self.ror(a , 2 ) ^ self.ror(a , 13 ) ^ self.ror(a , 22 )
SCREAMING_SNAKE_CASE : List[str] = (a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE : Any = (sa + maj) % 0x100_000_000
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
SCREAMING_SNAKE_CASE : Optional[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE : Tuple = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE : int = "".join([hex(a )[2:].zfill(8 ) for value in self.hashes] )
def __UpperCamelCase ( self : List[str] , a : int , a : int ) -> int:
"""simple docstring"""
return 0xFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
import hashlib
SCREAMING_SNAKE_CASE : Tuple = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(a ).hash , hashlib.shaaaa(a ).hexdigest() )
def lowerCamelCase__ ( ):
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file")
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : int = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb") as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.read()
else:
SCREAMING_SNAKE_CASE : List[Any] = bytes(_a , "utf-8")
print(SHAaaa(_a).hash)
if __name__ == "__main__":
main() | 25 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : Optional[int] , a : Dict=7 , a : Union[str, Any]=3 , a : List[Any]=30 , a : int=400 , a : Tuple=True , a : int=None , a : Optional[int]=True , a : int=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , a : int=True , a : str=1 / 255 , a : str=True , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : List[Any] = min_resolution
SCREAMING_SNAKE_CASE : Dict = max_resolution
SCREAMING_SNAKE_CASE : List[Any] = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Tuple = do_normalize
SCREAMING_SNAKE_CASE : int = image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std
SCREAMING_SNAKE_CASE : List[str] = do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_pad
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : List[str] , a : Optional[int] , a : Optional[Any]=False ) -> Any:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE : Tuple = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = image.size
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : Optional[int] = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE : List[str] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : Dict = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE : int = []
for image in image_inputs:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Optional[int] = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE : Optional[int] = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ConditionalDetrImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , a )
SCREAMING_SNAKE_CASE : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.get_expected_values(a , batched=a )
SCREAMING_SNAKE_CASE : Any = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : Tuple = json.loads(f.read() )
SCREAMING_SNAKE_CASE : int = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE : Union[str, Any] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(images=a , annotations=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify orig_size
SCREAMING_SNAKE_CASE : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : str = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE : List[str] = ConditionalDetrImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(images=a , annotations=a , masks_path=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify masks
SCREAMING_SNAKE_CASE : Dict = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , a )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) ) | 25 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='funnel'
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self : Optional[int] , a : Optional[Any]=3_0522 , a : str=[4, 4, 4] , a : Optional[Any]=None , a : List[str]=2 , a : int=768 , a : Union[str, Any]=12 , a : List[Any]=64 , a : List[str]=3072 , a : Optional[Any]="gelu_new" , a : Optional[int]=0.1 , a : List[Any]=0.1 , a : str=0.0 , a : Tuple=0.1 , a : Tuple=None , a : Union[str, Any]=1e-9 , a : Optional[Any]="mean" , a : Dict="relative_shift" , a : Dict=True , a : str=True , a : Optional[int]=True , **a : Dict , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = block_sizes
SCREAMING_SNAKE_CASE : Optional[int] = [1] * len(a ) if block_repeats is None else block_repeats
assert len(a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE : int = num_decoder_layers
SCREAMING_SNAKE_CASE : Dict = d_model
SCREAMING_SNAKE_CASE : List[str] = n_head
SCREAMING_SNAKE_CASE : Dict = d_head
SCREAMING_SNAKE_CASE : int = d_inner
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = initializer_std
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE : Tuple = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE : List[str] = attention_type
SCREAMING_SNAKE_CASE : Tuple = separate_cls
SCREAMING_SNAKE_CASE : Union[str, Any] = truncate_seq
SCREAMING_SNAKE_CASE : Optional[Any] = pool_q_only
super().__init__(**a )
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def __UpperCamelCase ( self : Optional[Any] , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." ) | 25 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'spiece.model'}
a_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
a_ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
a_ = '▁'
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , a : List[str] , a : List[str]=True , a : str=True , a : int=False , a : List[Any]="[CLS]" , a : str="[SEP]" , a : Tuple="<unk>" , a : Any="[SEP]" , a : List[str]="<pad>" , a : str="[CLS]" , a : Optional[int]="[MASK]" , a : Optional[Dict[str, Any]] = None , **a : Optional[int] , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = (
AddedToken(a , lstrip=a , rstrip=a , normalized=a )
if isinstance(a , a )
else mask_token
)
SCREAMING_SNAKE_CASE : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
SCREAMING_SNAKE_CASE : Any = do_lower_case
SCREAMING_SNAKE_CASE : Any = remove_space
SCREAMING_SNAKE_CASE : List[Any] = keep_accents
SCREAMING_SNAKE_CASE : Tuple = vocab_file
SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return len(self.sp_model )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : List[Any] = None
return state
def __setstate__( self : Any , a : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self : Dict , a : List[Any] ) -> Tuple:
"""simple docstring"""
if self.remove_space:
SCREAMING_SNAKE_CASE : Any = " ".join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE : List[str] = inputs
SCREAMING_SNAKE_CASE : Tuple = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
SCREAMING_SNAKE_CASE : Optional[int] = unicodedata.normalize("NFKD" , a )
SCREAMING_SNAKE_CASE : Tuple = "".join([c for c in outputs if not unicodedata.combining(a )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE : Optional[int] = outputs.lower()
return outputs
def __UpperCamelCase ( self : str , a : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.preprocess_text(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.encode(a , out_type=a )
SCREAMING_SNAKE_CASE : List[str] = []
for piece in pieces:
if len(a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE : Tuple = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a )
else:
new_pieces.append(a )
return new_pieces
def __UpperCamelCase ( self : str , a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.PieceToId(a )
def __UpperCamelCase ( self : Union[str, Any] , a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(a )
def __UpperCamelCase ( self : List[str] , a : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Any = ""
SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Tuple = []
else:
current_sub_tokens.append(a )
SCREAMING_SNAKE_CASE : Dict = False
out_string += self.sp_model.decode(a )
return out_string.strip()
def __UpperCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def __UpperCamelCase ( self : Any , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Optional[int] , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,) | 25 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = min(_a), max(_a)
SCREAMING_SNAKE_CASE : Dict = int(max_value - min_value) + 1
SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_a)]
for i in my_list:
buckets[int(i - min_value)].append(_a)
return [v for bucket in buckets for v in sorted(_a)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 25 | 1 |
a_ = range(2, 20 + 1)
a_ = [10**k for k in range(ks[-1] + 1)]
a_ = {}
def lowerCamelCase__ ( _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = sum(a_i[j] for j in range(_a , len(_a)))
SCREAMING_SNAKE_CASE : str = sum(a_i[j] * base[j] for j in range(min(len(_a) , _a)))
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = 0, 0
SCREAMING_SNAKE_CASE : List[Any] = n - i
SCREAMING_SNAKE_CASE : List[Any] = memo.get(_a)
if sub_memo is not None:
SCREAMING_SNAKE_CASE : Dict = sub_memo.get(_a)
if jumps is not None and len(_a) > 0:
# find and make the largest jump without going over
SCREAMING_SNAKE_CASE : Dict = -1
for _k in range(len(_a) - 1 , -1 , -1):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
SCREAMING_SNAKE_CASE : str = _k
break
if max_jump >= 0:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
SCREAMING_SNAKE_CASE : List[str] = diff + c
for j in range(min(_a , len(_a))):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = divmod(_a , 10)
if new_c > 0:
add(_a , _a , _a)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
SCREAMING_SNAKE_CASE : str = {c: []}
SCREAMING_SNAKE_CASE : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = next_term(_a , k - 1 , i + dn , _a)
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = compute(_a , _a , i + dn , _a)
diff += _diff
dn += terms_jumped
SCREAMING_SNAKE_CASE : Union[str, Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
SCREAMING_SNAKE_CASE : int = 0
while j < len(_a):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_a , (diff, dn, k))
return (diff, dn)
def lowerCamelCase__ ( _a , _a , _a , _a):
if i >= n:
return 0, i
if k > len(_a):
a_i.extend([0 for _ in range(k - len(_a))])
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
SCREAMING_SNAKE_CASE : Tuple = i
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = 0, 0, 0
for j in range(len(_a)):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
SCREAMING_SNAKE_CASE : str = ds_c + ds_b
diff += addend
SCREAMING_SNAKE_CASE : str = 0
for j in range(_a):
SCREAMING_SNAKE_CASE : str = a_i[j] + addend
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = divmod(_a , 10)
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_a , _a , _a)
return diff, i - start_i
def lowerCamelCase__ ( _a , _a , _a):
for j in range(_a , len(_a)):
SCREAMING_SNAKE_CASE : Tuple = digits[j] + addend
if s >= 10:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = divmod(_a , 10)
SCREAMING_SNAKE_CASE : int = addend // 10 + quotient
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = s
SCREAMING_SNAKE_CASE : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = divmod(_a , 10)
digits.append(_a)
def lowerCamelCase__ ( _a = 10**15):
SCREAMING_SNAKE_CASE : List[str] = [1]
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
while True:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = next_term(_a , 20 , i + dn , _a)
dn += terms_jumped
if dn == n - i:
break
SCREAMING_SNAKE_CASE : int = 0
for j in range(len(_a)):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''') | 25 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , a : List[str] , a : Optional[int]=7 , a : Tuple=3 , a : Optional[Any]=30 , a : Optional[Any]=400 , a : Optional[Any]=True , a : List[str]=None , a : Optional[Any]=True , a : Union[str, Any]=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , a : Optional[Any]=True , a : Dict=1 / 255 , a : Dict=True , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Any = min_resolution
SCREAMING_SNAKE_CASE : List[Any] = max_resolution
SCREAMING_SNAKE_CASE : Optional[int] = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE : Dict = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : str = rescale_factor
SCREAMING_SNAKE_CASE : Any = do_pad
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : List[Any] , a : List[Any] , a : str=False ) -> Tuple:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE : Any = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = image.size
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : List[Any] = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE : List[Any] = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : List[Any] = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE : Tuple = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE : Any = []
for image in image_inputs:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Optional[int] = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =DeformableDetrImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DeformableDetrImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "do_rescale" ) )
self.assertTrue(hasattr(a , "do_pad" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , a )
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
SCREAMING_SNAKE_CASE : int = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Optional[Any] = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE : Optional[int] = DeformableDetrImageProcessor()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(images=a , annotations=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : str = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Dict = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) )
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Tuple = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE : str = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE : Optional[Any] = DeformableDetrImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE : int = image_processing(images=a , annotations=a , masks_path=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : str = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : int = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[Any] = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , a )
# verify orig_size
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) ) | 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 | 1 |
from __future__ import annotations
import requests
a_ = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def lowerCamelCase__ ( _a , _a = 1 , _a = "new" , _a = None):
SCREAMING_SNAKE_CASE : Optional[int] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_a) - valid_terms)):
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Invalid search term: {invalid_search_terms}"
raise ValueError(_a)
SCREAMING_SNAKE_CASE : Tuple = requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
SCREAMING_SNAKE_CASE : Any = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_a)}
SCREAMING_SNAKE_CASE : Optional[int] = {}
for id_ in range(_a):
SCREAMING_SNAKE_CASE : List[str] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext'])) | 25 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset")}),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42)}),
SplitDict({"train": SplitInfo()}),
] , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = split_dict._to_yaml_list()
assert len(_a) == len(_a)
SCREAMING_SNAKE_CASE : List[Any] = SplitDict._from_yaml_list(_a)
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE : str = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE : Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=_a), SplitInfo(dataset_name="my_dataset")])
def lowerCamelCase__ ( _a):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
SCREAMING_SNAKE_CASE : Union[str, Any] = asdict(SplitDict({"train": split_info}))
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 25 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
def lowerCamelCase__ ( _a = 4000000):
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = b, a + b
return sum(_a)
if __name__ == "__main__":
print(F'''{solution() = }''') | 25 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='roformer'
def __init__( self : Dict , a : Any=5_0000 , a : List[Any]=None , a : str=768 , a : str=12 , a : Tuple=12 , a : Optional[Any]=3072 , a : List[str]="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=1536 , a : List[str]=2 , a : Tuple=0.02 , a : Any=1e-12 , a : Optional[int]=0 , a : Union[str, Any]=False , a : int=True , **a : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = rotary_value
SCREAMING_SNAKE_CASE : int = use_cache
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "sequence"}
SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 25 | 1 |
import logging
import os
from .state import PartialState
class _UpperCamelCase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( a : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __UpperCamelCase ( self : Optional[int] , a : Optional[Any] , a : Optional[int] , *a : Dict , **a : Any ) -> Union[str, Any]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("main_process_only" , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("in_order" , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
SCREAMING_SNAKE_CASE : Any = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def lowerCamelCase__ ( _a , _a = None):
if log_level is None:
SCREAMING_SNAKE_CASE : Dict = os.environ.get("ACCELERATE_LOG_LEVEL" , _a)
SCREAMING_SNAKE_CASE : int = logging.getLogger(_a)
if log_level is not None:
logger.setLevel(log_level.upper())
logger.root.setLevel(log_level.upper())
return MultiProcessAdapter(_a , {}) | 25 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cécé herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Dict = torch.load(_a , lambda _a , _a: storage)
SCREAMING_SNAKE_CASE : str = AbsSummarizer(_a , torch.device("cpu") , _a)
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(_a , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("This is sample éàalj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample 3 éàalj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids
SCREAMING_SNAKE_CASE : List[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : Optional[int] = original(_a , _a , _a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Dict = original.generator(_a)
SCREAMING_SNAKE_CASE : Any = new_model(
_a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Tuple = new_model.generator(_a)
SCREAMING_SNAKE_CASE : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : int = torch.allclose(_a , _a , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 25 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 25 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = args.log_outputs
SCREAMING_SNAKE_CASE : str = "_".join(args.dataset.split("/") + [args.config, args.split])
# load metric
SCREAMING_SNAKE_CASE : List[Any] = load_metric("wer")
SCREAMING_SNAKE_CASE : Union[str, Any] = load_metric("cer")
# compute metrics
SCREAMING_SNAKE_CASE : Dict = wer.compute(references=result["target"] , predictions=result["prediction"])
SCREAMING_SNAKE_CASE : Optional[Any] = cer.compute(references=result["target"] , predictions=result["prediction"])
# print & log results
SCREAMING_SNAKE_CASE : Optional[Any] = f"WER: {wer_result}\nCER: {cer_result}"
print(_a)
with open(f"{dataset_id}_eval_results.txt" , "w") as f:
f.write(_a)
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE : str = f"log_{dataset_id}_predictions.txt"
SCREAMING_SNAKE_CASE : str = f"log_{dataset_id}_targets.txt"
with open(_a , "w") as p, open(_a , "w") as t:
# mapping function to write output
def write_to_file(_a , _a):
p.write(f"{i}" + "\n")
p.write(batch["prediction"] + "\n")
t.write(f"{i}" + "\n")
t.write(batch["target"] + "\n")
result.map(_a , with_indices=_a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE : Tuple = re.sub(_a , "" , text.lower())
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE : Tuple = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE : List[Any] = " ".join(text.split(_a))
return text
def lowerCamelCase__ ( _a):
# load dataset
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_a)
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained(args.model_id)
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE : List[str] = dataset.cast_column("audio" , Audio(sampling_rate=_a))
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE : Tuple = 0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE : int = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device)
# map function to decode audio
def map_to_pred(_a):
SCREAMING_SNAKE_CASE : Optional[int] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s)
SCREAMING_SNAKE_CASE : Optional[Any] = prediction["text"]
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_text(batch["sentence"])
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE : Optional[Any] = dataset.map(_a , remove_columns=dataset.column_names)
# compute and log_results
# do not change function below
log_results(_a , _a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
a_ = parser.parse_args()
main(args) | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 1 |
def lowerCamelCase__ ( _a , _a):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 25 | 1 |
from collections.abc import Generator
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = 0, 1
while True:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = b, a + b
yield b
def lowerCamelCase__ ( _a = 1000):
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : int = fibonacci_generator()
while len(str(next(_a))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 1 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
a_ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
a_ = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
SCREAMING_SNAKE_CASE : Optional[Any] = bs[:]
SCREAMING_SNAKE_CASE : List[Any] = 0
for b in range(2**8):
if b not in bs:
bs.append(_a)
cs.append(2**8 + n)
n += 1
SCREAMING_SNAKE_CASE : Tuple = [chr(_a) for n in cs]
return dict(zip(_a , _a))
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Tuple = set()
SCREAMING_SNAKE_CASE : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
SCREAMING_SNAKE_CASE : Union[str, Any] = char
return pairs
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
def __init__( self : int , a : int , a : Any , a : List[Any]="replace" , a : Optional[Any]="<s>" , a : str="</s>" , a : Optional[Any]="</s>" , a : Optional[int]="<s>" , a : Optional[int]="<unk>" , a : Union[str, Any]="<pad>" , a : Tuple="<mask>" , a : Any=False , **a : Optional[int] , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
SCREAMING_SNAKE_CASE : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
SCREAMING_SNAKE_CASE : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
SCREAMING_SNAKE_CASE : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
SCREAMING_SNAKE_CASE : Tuple = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
SCREAMING_SNAKE_CASE : int = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE : Dict = json.load(a )
SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE : List[str] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE : Dict = bytes_to_unicode()
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE : str = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE : Dict = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE : Dict = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self : Tuple , a : Tuple ) -> List[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : Any = tuple(a )
SCREAMING_SNAKE_CASE : str = get_pairs(a )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : int = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = bigram
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while i < len(a ):
try:
SCREAMING_SNAKE_CASE : Optional[Any] = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : int = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(a )
SCREAMING_SNAKE_CASE : int = new_word
if len(a ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Any = get_pairs(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = " ".join(a )
SCREAMING_SNAKE_CASE : List[Any] = word
return word
def __UpperCamelCase ( self : Tuple , a : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
for token in re.findall(self.pat , a ):
SCREAMING_SNAKE_CASE : Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : List[Any] , a : str ) -> str:
"""simple docstring"""
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Optional[int] , a : Any ) -> Any:
"""simple docstring"""
return self.decoder.get(a )
def __UpperCamelCase ( self : List[str] , a : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = "".join(a )
SCREAMING_SNAKE_CASE : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __UpperCamelCase ( self : str , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : str = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE : int = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
SCREAMING_SNAKE_CASE : List[Any] = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE : int = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : Tuple , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def __UpperCamelCase ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : List[Any] , a : Union[str, Any] , a : Any=False , **a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE : Union[str, Any] = " " + text
return (text, kwargs)
def __UpperCamelCase ( self : Union[str, Any] , a : Union[Dict[str, EncodedInput], BatchEncoding] , a : Optional[int] = None , a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , a : Optional[int] = None , a : Optional[bool] = None , ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = super()._pad(
encoded_inputs=a , max_length=a , padding_strategy=a , pad_to_multiple_of=a , return_attention_mask=a , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : str = len(encoded_inputs["global_attention_mask"] ) != len(a )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : Tuple = len(a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : Dict = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , a : List[Any] , a : int=7 , a : List[Any]=3 , a : str=18 , a : Dict=30 , a : List[Any]=400 , a : str=True , a : Any=None , a : str=True , a : List[Any]=None , a : List[str]=True , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {"shortest_edge": 20}
SCREAMING_SNAKE_CASE : List[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : str = min_resolution
SCREAMING_SNAKE_CASE : List[Any] = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : Optional[int] = size
SCREAMING_SNAKE_CASE : Any = do_center_crop
SCREAMING_SNAKE_CASE : int = crop_size
SCREAMING_SNAKE_CASE : int = do_flip_channel_order
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "do_center_crop" ) )
self.assertTrue(hasattr(a , "center_crop" ) )
self.assertTrue(hasattr(a , "do_flip_channel_order" ) )
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : List[Any] = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 25 |
from math import factorial, pi
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_sin() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : int = float(_a)
SCREAMING_SNAKE_CASE : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(_a))
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_cos() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : str = float(_a)
SCREAMING_SNAKE_CASE : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(_a))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15)) | 25 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = []
for rt in rc.restypes:
SCREAMING_SNAKE_CASE : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
SCREAMING_SNAKE_CASE : Dict = {name: i for i, name in enumerate(_a)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
_a , dtype=torch.intaa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
_a , dtype=torch.intaa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
_a , dtype=torch.floataa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : Optional[Any] = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE : Any = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE : int = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE : List[str] = residx_atomaa_mask
SCREAMING_SNAKE_CASE : Tuple = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE : str = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
SCREAMING_SNAKE_CASE : Union[str, Any] = rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE : Optional[int] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE : Union[str, Any] = rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE : int = residx_atomaa_mask
return protein
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = tree_map(lambda _a: torch.tensor(_a , device=batch["aatype"].device) , _a , np.ndarray)
SCREAMING_SNAKE_CASE : int = tensor_tree_map(lambda _a: np.array(_a) , make_atomaa_masks(_a))
return out | 25 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
a_ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ = 'allenai'
def lowerCamelCase__ ( _a):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
SCREAMING_SNAKE_CASE : Union[str, Any] = dict((re.sub(r"@@$" , "" , _a), v) if k.endswith("@@") else (re.sub(r"$" , "</w>" , _a), v) for k, v in d.items())
SCREAMING_SNAKE_CASE : Optional[int] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"{k}</w>"]
SCREAMING_SNAKE_CASE : Optional[int] = d[k] # restore
return da
def lowerCamelCase__ ( _a , _a):
# prep
assert os.path.exists(_a)
os.makedirs(_a , exist_ok=_a)
print(f"Writing results to {pytorch_dump_folder_path}")
# handle various types of models
SCREAMING_SNAKE_CASE : Dict = basename(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = dirname(_a)
SCREAMING_SNAKE_CASE : Optional[int] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE : int = cls.hub_models()
SCREAMING_SNAKE_CASE : List[Any] = {"bpe": "fastbpe", "tokenizer": "moses"}
SCREAMING_SNAKE_CASE : List[Any] = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"using checkpoint {checkpoint_file}")
SCREAMING_SNAKE_CASE : Dict = hub_utils.from_pretrained(
_a , _a , _a , archive_map=_a , **_a)
SCREAMING_SNAKE_CASE : int = vars(chkpt["args"]["model"])
SCREAMING_SNAKE_CASE : Union[str, Any] = args["source_lang"]
SCREAMING_SNAKE_CASE : List[Any] = args["target_lang"]
SCREAMING_SNAKE_CASE : Any = dirname(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = basename(_a)
# dicts
SCREAMING_SNAKE_CASE : Tuple = os.path.join(_a , f"dict.{src_lang}.txt")
SCREAMING_SNAKE_CASE : Dict = os.path.join(_a , f"dict.{tgt_lang}.txt")
SCREAMING_SNAKE_CASE : Dict = Dictionary.load(_a)
SCREAMING_SNAKE_CASE : str = rewrite_dict_keys(src_dict.indices)
SCREAMING_SNAKE_CASE : int = len(_a)
SCREAMING_SNAKE_CASE : Any = os.path.join(_a , "vocab-src.json")
print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a))
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE : Tuple = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE : Dict = False
break
SCREAMING_SNAKE_CASE : List[Any] = Dictionary.load(_a)
SCREAMING_SNAKE_CASE : Optional[int] = rewrite_dict_keys(tgt_dict.indices)
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_a)
SCREAMING_SNAKE_CASE : int = os.path.join(_a , "vocab-tgt.json")
print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a))
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_a , VOCAB_FILES_NAMES["merges_file"])
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(_a , _a)
if os.path.exists(_a):
break
with open(_a , encoding="utf-8") as fin:
SCREAMING_SNAKE_CASE : Optional[Any] = fin.read()
SCREAMING_SNAKE_CASE : Dict = re.sub(r" \d+$" , "" , _a , 0 , re.M) # remove frequency number
print(f"Generating {merges_file}")
with open(_a , "w" , encoding="utf-8") as fout:
fout.write(_a)
# model config
SCREAMING_SNAKE_CASE : Dict = os.path.join(_a , "config.json")
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
SCREAMING_SNAKE_CASE : int = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE : List[Any] = 5
SCREAMING_SNAKE_CASE : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE : Tuple = best_score_hparams[model_dir]["length_penalty"]
else:
SCREAMING_SNAKE_CASE : int = 1.0
print(f"Generating {fsmt_model_config_file}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a))
# tokenizer config
SCREAMING_SNAKE_CASE : int = os.path.join(_a , _a)
SCREAMING_SNAKE_CASE : Dict = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(f"Generating {fsmt_tokenizer_config_file}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a))
# model
SCREAMING_SNAKE_CASE : Tuple = chkpt["models"][0]
SCREAMING_SNAKE_CASE : List[Any] = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE : Any = OrderedDict(("model." + k, v) for k, v in model_state_dict.items())
# remove unneeded keys
SCREAMING_SNAKE_CASE : Optional[int] = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(_a , _a)
SCREAMING_SNAKE_CASE : Any = FSMTConfig.from_pretrained(_a)
SCREAMING_SNAKE_CASE : int = FSMTForConditionalGeneration(_a)
# check that it loads ok
model_new.load_state_dict(_a , strict=_a)
# save
SCREAMING_SNAKE_CASE : Tuple = os.path.join(_a , _a)
print(f"Generating {pytorch_weights_dump_path}")
torch.save(_a , _a)
print("Conversion is done!")
print("\nLast step is to upload the files to s3")
print(f"cd {data_root}")
print(f"transformers-cli upload {model_dir}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path) | 25 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 1 |
def lowerCamelCase__ ( _a , _a = False):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : List[Any] = f"Expected string as input, found {type(_a)}"
raise ValueError(_a)
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : List[Any] = f"Expected boolean as use_pascal parameter, found {type(_a)}"
raise ValueError(_a)
SCREAMING_SNAKE_CASE : Dict = input_str.split("_")
SCREAMING_SNAKE_CASE : List[str] = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE : Dict = words[start_index:]
SCREAMING_SNAKE_CASE : str = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE : Optional[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words])
if __name__ == "__main__":
from doctest import testmod
testmod() | 25 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 1 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
raise ValueError("multiplicative_persistence() only accepts integral values")
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values")
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : List[Any] = str(_a)
while len(_a) != 1:
SCREAMING_SNAKE_CASE : Tuple = [int(_a) for i in num_string]
SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(0 , len(_a)):
total *= numbers[i]
SCREAMING_SNAKE_CASE : Dict = str(_a)
steps += 1
return steps
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
raise ValueError("additive_persistence() only accepts integral values")
if num < 0:
raise ValueError("additive_persistence() does not accept negative values")
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = str(_a)
while len(_a) != 1:
SCREAMING_SNAKE_CASE : str = [int(_a) for i in num_string]
SCREAMING_SNAKE_CASE : Tuple = 0
for i in range(0 , len(_a)):
total += numbers[i]
SCREAMING_SNAKE_CASE : Any = str(_a)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 1 |
from collections import Counter
from timeit import timeit
def lowerCamelCase__ ( _a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "").lower()).values()) < 2
def lowerCamelCase__ ( _a = ""):
if len(_a) == 0:
return True
SCREAMING_SNAKE_CASE : List[Any] = input_str.replace(" " , "").lower()
# character_freq_dict: Stores the frequency of every character in the input string
SCREAMING_SNAKE_CASE : dict[str, int] = {}
for character in lower_case_input_str:
SCREAMING_SNAKE_CASE : Optional[int] = character_freq_dict.get(_a , 0) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCamelCase__ ( _a = ""):
print("\nFor string = " , _a , ":")
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_a) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_a) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
a_ = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
a_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''') | 25 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
import heapq
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_a , [-1 * len(_a), (key, value)])
# chosen_vertices = set of chosen vertices
SCREAMING_SNAKE_CASE : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
SCREAMING_SNAKE_CASE : str = heapq.heappop(_a)[1][0]
chosen_vertices.add(_a)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
SCREAMING_SNAKE_CASE : List[str] = elem[1][1].index(_a)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_a)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''') | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = min(_a), max(_a)
SCREAMING_SNAKE_CASE : Dict = int(max_value - min_value) + 1
SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_a)]
for i in my_list:
buckets[int(i - min_value)].append(_a)
return [v for bucket in buckets for v in sorted(_a)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 25 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , a : Any , a : str=13 , a : Optional[int]=30 , a : int=2 , a : str=3 , a : Optional[Any]=True , a : List[Any]=True , a : Any=32 , a : Any=2 , a : Optional[int]=4 , a : Dict=37 , a : List[Any]="gelu" , a : int=0.1 , a : List[str]=0.1 , a : Dict=10 , a : Optional[int]=0.02 , a : Tuple=3 , a : Union[str, Any]=None , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[str] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Optional[Any] = num_patches + 1
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : List[Any] , a : Any , a : Union[str, Any] , a : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = TFViTModel(config=a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE : Tuple = self.image_size // 2
SCREAMING_SNAKE_CASE : List[Any] = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE : int = model(a , interpolate_pos_encoding=a , training=a )
SCREAMING_SNAKE_CASE : List[Any] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __UpperCamelCase ( self : List[str] , a : List[str] , a : Tuple , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = TFViTForImageClassification(a )
SCREAMING_SNAKE_CASE : str = model(a , labels=a , training=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE : Tuple = self.image_size // 2
SCREAMING_SNAKE_CASE : int = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE : Tuple = model(a , interpolate_pos_encoding=a , training=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = TFViTForImageClassification(a )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCamelCase__ =(
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = TFViTModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , tf.keras.layers.Layer ) )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(a )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(a )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=a , return_tensors="tf" )
# forward pass
SCREAMING_SNAKE_CASE : Dict = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE : Dict = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , a , atol=1e-4 ) | 25 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ = get_tests_dir('fixtures/dummy-config.json')
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 0
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(a , a )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(a )
self.assertIsInstance(a , a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(a )
self.assertIsInstance(a , a )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AutoConfig.for_model("roberta" )
self.assertIsInstance(a , a )
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE : List[str] = os.path.join(a , "fake-roberta" )
os.makedirs(a , exist_ok=a )
with open(os.path.join(a , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(a )
self.assertEqual(type(a ) , a )
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register("custom" , a )
# Wrong model type will raise an error
with self.assertRaises(a ):
AutoConfig.register("model" , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoConfig.register("bert" , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
a , "bert-base is not a local folder and is not a valid model identifier" ):
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained("bert-base" )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
a , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(a , revision="aaaaaa" )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
a , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=a )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=a )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(a , trust_remote_code=a )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='new-model'
try:
AutoConfig.register("new-model" , a )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=a )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=a )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"] | 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , a : Optional[int] , a : List[str]=13 , a : Optional[int]=10 , a : str=3 , a : int=2 , a : Optional[Any]=2 , a : Any=True , a : Optional[int]=True , a : List[Any]=32 , a : Optional[Any]=5 , a : str=4 , a : Optional[int]=37 , a : int="gelu" , a : Dict=0.1 , a : Optional[Any]=0.1 , a : Dict=10 , a : List[Any]=0.02 , a : Dict="divided_space_time" , a : Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : Dict = patch_size
SCREAMING_SNAKE_CASE : List[str] = num_frames
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_type
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : Dict = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE : List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Dict = (num_frames) * self.num_patches_per_frame + 1
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
return config
def __UpperCamelCase ( self : int , a : Tuple , a : List[Any] , a : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TimesformerModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any] , a : Tuple , a : Dict , a : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = TimesformerForVideoClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a )
# verify the logits shape
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , a )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCamelCase__ =(
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = TimesformerModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(
self , config_class=a , has_text_modality=a , hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] , a : Union[str, Any] , a : Dict , a : Optional[Any]=False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(a )
if return_labels:
if model_class in get_values(a ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(a )
SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*a )
@slow
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = TimesformerModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.seq_length
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.num_frames
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Any = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE : List[Any] = len(a )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(a : Tuple , a : Dict , a : Tuple ):
SCREAMING_SNAKE_CASE : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(a , a , a )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset")
SCREAMING_SNAKE_CASE : Any = np.load(_a)
return list(_a)
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
a )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_video()
SCREAMING_SNAKE_CASE : Dict = image_processor(video[:8] , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) | 25 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['pixel_values']
def __init__( self : Tuple , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) -> None:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(a )
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : List[str] = size
SCREAMING_SNAKE_CASE : List[str] = resample
SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE : int = crop_size
SCREAMING_SNAKE_CASE : Tuple = do_rescale
SCREAMING_SNAKE_CASE : str = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : int , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __UpperCamelCase ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __UpperCamelCase ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] ) -> np.ndarray:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def __UpperCamelCase ( self : Optional[int] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def __UpperCamelCase ( self : Any , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(a )
SCREAMING_SNAKE_CASE : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : str = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE : Union[str, Any] = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a ) | 25 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCamelCase ( __A ):
'''simple docstring'''
@slow
@require_torch
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
SCREAMING_SNAKE_CASE : str = BertTokenizer.from_pretrained("bert-base-uncased" )
SCREAMING_SNAKE_CASE : int = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE : Dict = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE : List[Any] = 128
SCREAMING_SNAKE_CASE : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
SCREAMING_SNAKE_CASE : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
SCREAMING_SNAKE_CASE : List[Any] = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE : List[str] = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE : List[Any] = 4
def _map_to_encoder_decoder_inputs(a : Tuple ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE : Any = tokenizer(batch["article"] , padding="max_length" , truncation=a , max_length=512 )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(batch["highlights"] , padding="max_length" , truncation=a , max_length=128 )
SCREAMING_SNAKE_CASE : Tuple = inputs.input_ids
SCREAMING_SNAKE_CASE : List[Any] = inputs.attention_mask
SCREAMING_SNAKE_CASE : List[str] = outputs.input_ids
SCREAMING_SNAKE_CASE : List[Any] = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE : List[str] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.attention_mask
assert all(len(a ) == 512 for x in inputs.input_ids )
assert all(len(a ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a : int ):
SCREAMING_SNAKE_CASE : Dict = pred.label_ids
SCREAMING_SNAKE_CASE : Dict = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_decode(a , skip_special_tokens=a )
SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(a , skip_special_tokens=a )
SCREAMING_SNAKE_CASE : Optional[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a ) )] ) / len(a )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE : Dict = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=a , batch_size=a , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
SCREAMING_SNAKE_CASE : Union[str, Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=a , batch_size=a , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : List[str] = SeqaSeqTrainingArguments(
output_dir=a , per_device_train_batch_size=a , per_device_eval_batch_size=a , predict_with_generate=a , evaluation_strategy="steps" , do_train=a , do_eval=a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE : Optional[Any] = SeqaSeqTrainer(
model=a , args=a , compute_metrics=_compute_metrics , train_dataset=a , eval_dataset=a , tokenizer=a , )
# start training
trainer.train() | 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
for attribute in key.split("."):
SCREAMING_SNAKE_CASE : List[Any] = getattr(_a , _a)
if weight_type is not None:
SCREAMING_SNAKE_CASE : str = getattr(_a , _a).shape
else:
SCREAMING_SNAKE_CASE : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}")
if weight_type == "weight":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : int = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : str = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(".")[:-1]) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE : Optional[int] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(_a)[0].split(".")[-2]
SCREAMING_SNAKE_CASE : Dict = mapped_key.replace("*" , _a)
if "weight_g" in name:
SCREAMING_SNAKE_CASE : Any = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : List[str] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : Any = "weight"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
set_recursively(_a , _a , _a , _a , _a)
continue
if not is_used:
unused_weights.append(_a)
logger.warning(f"Unused weights: {unused_weights}")
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split("conv_layers.")[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = name.split(".")
SCREAMING_SNAKE_CASE : Any = int(items[0])
SCREAMING_SNAKE_CASE : Any = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(_a)
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a=None , _a=None , _a=True):
if config_path is not None:
SCREAMING_SNAKE_CASE : Tuple = UniSpeechSatConfig.from_pretrained(_a)
else:
SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig()
SCREAMING_SNAKE_CASE : List[str] = ""
if is_finetuned:
SCREAMING_SNAKE_CASE : Dict = UniSpeechSatForCTC(_a)
else:
SCREAMING_SNAKE_CASE : str = UniSpeechSatForPreTraining(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
SCREAMING_SNAKE_CASE : Union[str, Any] = model[0].eval()
recursively_load_weights(_a , _a)
hf_wavavec.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 25 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowerCamelCase__ ( _a=None , _a=None):
return field(default_factory=lambda: default , metadata=_a)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
metadata={'help': 'The csv file to plot.'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Disable logarithmic scale when plotting'} , )
lowerCamelCase__ =field(
default=__A , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
lowerCamelCase__ =list_field(
default=__A , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def lowerCamelCase__ ( _a):
try:
int(_a)
return True
except ValueError:
return False
def lowerCamelCase__ ( _a):
try:
float(_a)
return True
except ValueError:
return False
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , a : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = args
SCREAMING_SNAKE_CASE : int = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
SCREAMING_SNAKE_CASE : Optional[Any] = csv.DictReader(a )
for row in reader:
SCREAMING_SNAKE_CASE : Optional[Any] = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
SCREAMING_SNAKE_CASE : Tuple = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
SCREAMING_SNAKE_CASE : str = float(row["result"] )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = plt.subplots()
SCREAMING_SNAKE_CASE : Tuple = "Time usage" if self.args.is_time else "Memory usage"
SCREAMING_SNAKE_CASE : Union[str, Any] = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
SCREAMING_SNAKE_CASE : str = sorted(set(self.result_dict[model_name]["bsz"] ) )
SCREAMING_SNAKE_CASE : Any = sorted(set(self.result_dict[model_name]["seq_len"] ) )
SCREAMING_SNAKE_CASE : Any = self.result_dict[model_name]["result"]
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
SCREAMING_SNAKE_CASE : List[str] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a , )
else:
SCREAMING_SNAKE_CASE : Tuple = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Union[str, Any] = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
SCREAMING_SNAKE_CASE : str = np.asarray(a , a )[: len(a )]
plt.scatter(
a , a , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a , a , "--" )
title_str += F" {label_model_name} vs."
SCREAMING_SNAKE_CASE : List[Any] = title_str[:-4]
SCREAMING_SNAKE_CASE : Optional[int] = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(a )
plt.xlabel(a )
plt.ylabel(a )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Any = HfArgumentParser(_a)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE : Any = Plot(args=_a)
plot.plot()
if __name__ == "__main__":
main() | 25 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 | 1 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
def __UpperCamelCase ( self : Dict ) -> None:
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(a , " -> " , " -> ".join([str(a ) for j in self.vertex[i]] ) )
def __UpperCamelCase ( self : List[str] , a : int , a : int ) -> None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(a )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE : int = [to_vertex]
def __UpperCamelCase ( self : List[str] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(a , a )
def __UpperCamelCase ( self : Optional[int] , a : int , a : list ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = True
print(a , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(a , a )
if __name__ == "__main__":
a_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 25 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='roformer'
def __init__( self : Dict , a : Any=5_0000 , a : List[Any]=None , a : str=768 , a : str=12 , a : Tuple=12 , a : Optional[Any]=3072 , a : List[str]="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=1536 , a : List[str]=2 , a : Tuple=0.02 , a : Any=1e-12 , a : Optional[int]=0 , a : Union[str, Any]=False , a : int=True , **a : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = rotary_value
SCREAMING_SNAKE_CASE : int = use_cache
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "sequence"}
SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 25 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , a : Tuple , a : List[str]=13 , a : List[str]=7 , a : Union[str, Any]=True , a : List[Any]=True , a : Any=False , a : List[str]=True , a : str=99 , a : Union[str, Any]=64 , a : Any=5 , a : Dict=4 , a : List[Any]=64 , a : Optional[Any]="gelu" , a : Tuple=0.1 , a : Union[str, Any]=0.1 , a : Dict=512 , a : List[str]=16 , a : Tuple=2 , a : Optional[int]=0.02 , a : Tuple=3 , a : List[Any]=4 , a : Any=None , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Optional[int] = num_choices
SCREAMING_SNAKE_CASE : Optional[int] = scope
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[int] , a : Union[str, Any] , a : str , a : int , a : List[str] , a : Union[str, Any] , a : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = MPNetModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(a , a )
SCREAMING_SNAKE_CASE : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : List[str] , a : Any , a : Union[str, Any] , a : Dict , a : int , a : List[Any] , a : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MPNetForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
a , attention_mask=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple , a : Union[str, Any] , a : int , a : Optional[int] , a : Dict , a : Any , a : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = MPNetForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , a : Any , a : Optional[Any] , a : Dict , a : Optional[Any] , a : int , a : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[int] = MPNetForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Tuple , a : Optional[int] , a : List[Any] , a : Tuple , a : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = MPNetForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =True
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = MPNetModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=a , hidden_size=37 )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a )
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = MPNetModel.from_pretrained("microsoft/mpnet-base" )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : List[str] = model(a )[0]
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) ) | 25 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cécé herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Dict = torch.load(_a , lambda _a , _a: storage)
SCREAMING_SNAKE_CASE : str = AbsSummarizer(_a , torch.device("cpu") , _a)
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(_a , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("This is sample éàalj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample 3 éàalj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids
SCREAMING_SNAKE_CASE : List[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : Optional[int] = original(_a , _a , _a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Dict = original.generator(_a)
SCREAMING_SNAKE_CASE : Any = new_model(
_a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Tuple = new_model.generator(_a)
SCREAMING_SNAKE_CASE : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : int = torch.allclose(_a , _a , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 25 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='deberta-v2'
def __init__( self : List[str] , a : int=12_8100 , a : str=1536 , a : List[Any]=24 , a : Union[str, Any]=24 , a : Union[str, Any]=6144 , a : Dict="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : Any=512 , a : Optional[int]=0 , a : Any=0.02 , a : int=1e-7 , a : Dict=False , a : Optional[int]=-1 , a : int=0 , a : Union[str, Any]=True , a : Dict=None , a : List[Any]=0 , a : Tuple="gelu" , **a : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : str = relative_attention
SCREAMING_SNAKE_CASE : Dict = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = position_biased_input
# Backwards compatibility
if type(a ) == str:
SCREAMING_SNAKE_CASE : Any = [x.strip() for x in pos_att_type.lower().split("|" )]
SCREAMING_SNAKE_CASE : Dict = pos_att_type
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = kwargs.get("pooler_hidden_size" , a )
SCREAMING_SNAKE_CASE : int = pooler_dropout
SCREAMING_SNAKE_CASE : List[str] = pooler_hidden_act
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : Dict = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return 12
def __UpperCamelCase ( self : Optional[Any] , a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a : int = -1 , a : int = -1 , a : int = -1 , a : bool = False , a : Optional["TensorType"] = None , a : int = 3 , a : int = 40 , a : int = 40 , a : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = super().generate_dummy_inputs(preprocessor=a , framework=a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 25 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 25 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 1 |
import pytest
a_ = '__dummy_dataset1__'
a_ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def lowerCamelCase__ ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase__ ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = dataset_loading_script_name
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = script_dir / f"{script_name}.py"
with open(_a , "w") as f:
f.write(_a)
return str(_a) | 25 |
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 25 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='gptsan-japanese'
lowerCamelCase__ =[
'past_key_values',
]
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : int , a : Any=3_6000 , a : Optional[int]=1280 , a : Tuple=1024 , a : Optional[Any]=8192 , a : List[str]=4096 , a : Optional[Any]=128 , a : Optional[Any]=10 , a : List[Any]=0 , a : Union[str, Any]=16 , a : str=16 , a : Tuple=128 , a : List[str]=0.0 , a : Union[str, Any]=1e-5 , a : int=False , a : Optional[int]=0.0 , a : Union[str, Any]="float32" , a : str=False , a : Dict=False , a : Dict=False , a : Optional[Any]=0.002 , a : Tuple=False , a : List[str]=True , a : int=3_5998 , a : Union[str, Any]=3_5995 , a : int=3_5999 , **a : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = d_model
SCREAMING_SNAKE_CASE : Union[str, Any] = d_ff
SCREAMING_SNAKE_CASE : Optional[Any] = d_ext
SCREAMING_SNAKE_CASE : List[str] = d_spout
SCREAMING_SNAKE_CASE : Optional[Any] = num_switch_layers
SCREAMING_SNAKE_CASE : str = num_ext_layers
SCREAMING_SNAKE_CASE : int = num_switch_layers + num_ext_layers
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Optional[int] = num_experts
SCREAMING_SNAKE_CASE : Optional[int] = expert_capacity
SCREAMING_SNAKE_CASE : List[str] = dropout_rate
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : List[str] = router_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = router_jitter_noise
SCREAMING_SNAKE_CASE : Tuple = router_dtype
SCREAMING_SNAKE_CASE : Dict = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : Tuple = output_hidden_states
SCREAMING_SNAKE_CASE : Union[str, Any] = output_attentions
SCREAMING_SNAKE_CASE : str = initializer_factor
SCREAMING_SNAKE_CASE : Union[str, Any] = output_router_logits
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
super().__init__(
separator_token_id=a , pad_token_id=a , eos_token_id=a , **a , ) | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'The column name of the images in the files.'} )
lowerCamelCase__ =field(default=__A , metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase__ =field(default=__A , metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase__ =field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE : Dict = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE : Tuple = self.validation_dir
SCREAMING_SNAKE_CASE : Any = data_files if data_files else None
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCamelCase__ =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ =field(default=__A , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ =field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =field(
default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = torch.stack([example["pixel_values"] for example in examples])
return {"pixel_values": pixel_values}
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , _a , _a)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[Any] = training_args.get_process_log_level()
logger.setLevel(_a)
transformers.utils.logging.set_verbosity(_a)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}")
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Dict = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : List[str] = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Initialize our dataset.
SCREAMING_SNAKE_CASE : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : Dict = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : str = ds["train"].train_test_split(data_args.train_val_split)
SCREAMING_SNAKE_CASE : Tuple = split["train"]
SCREAMING_SNAKE_CASE : Dict = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : str = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **_a)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_a)
else:
SCREAMING_SNAKE_CASE : List[Any] = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
})
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_a)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_a)
else:
SCREAMING_SNAKE_CASE : Any = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
SCREAMING_SNAKE_CASE : Optional[int] = ViTMAEForPreTraining(_a)
if training_args.do_train:
SCREAMING_SNAKE_CASE : Optional[Any] = ds["train"].column_names
else:
SCREAMING_SNAKE_CASE : List[Any] = ds["validation"].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE : Optional[int] = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE : Tuple = "image"
elif "img" in column_names:
SCREAMING_SNAKE_CASE : str = "img"
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : List[Any] = image_processor.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (image_processor.size["height"], image_processor.size["width"])
SCREAMING_SNAKE_CASE : Any = Compose(
[
Lambda(lambda _a: img.convert("RGB") if img.mode != "RGB" else img),
RandomResizedCrop(_a , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std),
])
def preprocess_images(_a):
SCREAMING_SNAKE_CASE : Any = [transforms(_a) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset")
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : int = ds["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
# Set the training transforms
ds["train"].set_transform(_a)
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset")
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : List[str] = (
ds["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
ds["validation"].set_transform(_a)
# Compute absolute learning rate
SCREAMING_SNAKE_CASE : Optional[int] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE : List[Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=_a , args=_a , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : int = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : Any = last_checkpoint
SCREAMING_SNAKE_CASE : Optional[int] = trainer.train(resume_from_checkpoint=_a)
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics)
trainer.save_metrics("train" , train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : Dict = trainer.evaluate()
trainer.log_metrics("eval" , _a)
trainer.save_metrics("eval" , _a)
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : List[str] = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a)
else:
trainer.create_model_card(**_a)
def lowerCamelCase__ ( _a):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='ViTImageProcessor'
lowerCamelCase__ =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[int] , a : List[Any]=None , a : str=None , **a : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Optional[int]=None , a : Tuple=None , a : List[Any]=None , **a : str ) -> Optional[int]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(a , return_tensors=a , **a )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE : List[str] = self.image_processor(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE : int = self.image_processor(a , return_tensors=a , **a )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE : List[Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE : Any = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def __UpperCamelCase ( self : Optional[Any] , *a : int , **a : List[str] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def __UpperCamelCase ( self : Optional[int] , *a : Any , **a : List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor | 25 |
from math import factorial, pi
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_sin() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : int = float(_a)
SCREAMING_SNAKE_CASE : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(_a))
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_cos() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : str = float(_a)
SCREAMING_SNAKE_CASE : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(_a))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15)) | 25 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =(PNDMScheduler,)
lowerCamelCase__ =(('num_inference_steps', 50),)
def __UpperCamelCase ( self : List[Any] , **a : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a )
return config
def __UpperCamelCase ( self : Optional[int] , a : List[str]=0 , **a : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("num_inference_steps" , a )
SCREAMING_SNAKE_CASE : str = self.dummy_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 * sample
SCREAMING_SNAKE_CASE : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(**a )
SCREAMING_SNAKE_CASE : Any = scheduler_class(**a )
scheduler.set_timesteps(a )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class.from_pretrained(a )
new_scheduler.set_timesteps(a )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : Optional[Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE : Dict = scheduler.step_prk(a , a , a , **a ).prev_sample
SCREAMING_SNAKE_CASE : int = new_scheduler.step_prk(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step_plms(a , a , a , **a ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = new_scheduler.step_plms(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[str] , a : Optional[int]=0 , **a : Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("num_inference_steps" , a )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 * sample
SCREAMING_SNAKE_CASE : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**a )
scheduler.set_timesteps(a )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a )
SCREAMING_SNAKE_CASE : Any = scheduler_class.from_pretrained(a )
# copy over dummy past residuals
new_scheduler.set_timesteps(a )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE : int = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step_prk(a , a , a , **a ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = new_scheduler.step_prk(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE : str = scheduler.step_plms(a , a , a , **a ).prev_sample
SCREAMING_SNAKE_CASE : List[str] = new_scheduler.step_plms(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : Optional[int] , **a : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(**a )
SCREAMING_SNAKE_CASE : str = scheduler_class(**a )
SCREAMING_SNAKE_CASE : List[Any] = 10
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(a )
for i, t in enumerate(scheduler.prk_timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a , a )
SCREAMING_SNAKE_CASE : Any = scheduler.step_prk(a , a , a ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
SCREAMING_SNAKE_CASE : Dict = model(a , a )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step_plms(a , a , a ).prev_sample
return sample
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : int = kwargs.pop("num_inference_steps" , a )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : str = scheduler_class(**a )
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample
SCREAMING_SNAKE_CASE : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps" ):
scheduler.set_timesteps(a )
elif num_inference_steps is not None and not hasattr(a , "set_timesteps" ):
SCREAMING_SNAKE_CASE : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE : Dict = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE : Any = scheduler.step_prk(a , 0 , a , **a ).prev_sample
SCREAMING_SNAKE_CASE : str = scheduler.step_prk(a , 1 , a , **a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step_plms(a , 0 , a , **a ).prev_sample
SCREAMING_SNAKE_CASE : int = scheduler.step_plms(a , 1 , a , **a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a )
SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**a )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=a )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 27
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : str = self.dummy_sample
SCREAMING_SNAKE_CASE : Tuple = 0.1 * sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**a )
scheduler.set_timesteps(a )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
SCREAMING_SNAKE_CASE : Dict = scheduler.step_prk(a , a , a ).prev_sample
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**a )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop()
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(a ) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(a ) )
SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.full_loop(set_alpha_to_one=a , beta_start=0.01 )
SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(a ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.full_loop(set_alpha_to_one=a , beta_start=0.01 )
SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(a ) )
SCREAMING_SNAKE_CASE : Dict = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3 | 25 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 | 1 |
import math
def lowerCamelCase__ ( _a , _a = 0 , _a = 0):
SCREAMING_SNAKE_CASE : List[str] = end or len(_a)
for i in range(_a , _a):
SCREAMING_SNAKE_CASE : List[Any] = i
SCREAMING_SNAKE_CASE : Any = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE : Tuple = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE : Optional[int] = temp_index_value
return array
def lowerCamelCase__ ( _a , _a , _a): # Max Heap
SCREAMING_SNAKE_CASE : Union[str, Any] = index
SCREAMING_SNAKE_CASE : str = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE : List[Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE : Tuple = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE : Optional[int] = right_index
if largest != index:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = array[largest], array[index]
heapify(_a , _a , _a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_a)
for i in range(n // 2 , -1 , -1):
heapify(_a , _a , _a)
for i in range(n - 1 , 0 , -1):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = array[0], array[i]
heapify(_a , 0 , _a)
return array
def lowerCamelCase__ ( _a , _a , _a , _a):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase__ ( _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = low
SCREAMING_SNAKE_CASE : List[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = array[j], array[i]
i += 1
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return array
SCREAMING_SNAKE_CASE : int = 2 * math.ceil(math.loga(len(_a)))
SCREAMING_SNAKE_CASE : Optional[int] = 16
return intro_sort(_a , 0 , len(_a) , _a , _a)
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_a)
max_depth -= 1
SCREAMING_SNAKE_CASE : Optional[int] = median_of_a(_a , _a , start + ((end - start) // 2) + 1 , end - 1)
SCREAMING_SNAKE_CASE : List[str] = partition(_a , _a , _a , _a)
intro_sort(_a , _a , _a , _a , _a)
SCREAMING_SNAKE_CASE : int = p
return insertion_sort(_a , _a , _a)
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = input('Enter numbers separated by a comma : ').strip()
a_ = [float(item) for item in user_input.split(',')]
print(sort(unsorted)) | 25 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['GLPNFeatureExtractor']
a_ = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _a , _a , _a):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE : Optional[int] = LxmertConfig.from_json_file(_a)
print(f"Building PyTorch model from configuration: {config}")
SCREAMING_SNAKE_CASE : List[Any] = LxmertForPreTraining(_a)
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_a , _a , _a)
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}")
torch.save(model.state_dict() , _a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 25 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 1 |
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = int(_a)
if decimal in (0, 1): # Exit cases for the recursion
return str(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = divmod(_a , 2)
return binary_recursive(_a) + str(_a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[int] = str(_a).strip()
if not number:
raise ValueError("No input value was provided")
SCREAMING_SNAKE_CASE : List[Any] = "-" if number.startswith("-") else ""
SCREAMING_SNAKE_CASE : Any = number.lstrip("-")
if not number.isnumeric():
raise ValueError("Input value is not an integer")
return f"{negative}0b{binary_recursive(int(_a))}"
if __name__ == "__main__":
from doctest import testmod
testmod() | 25 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a : Optional[Any] , a : List[str]=13 , a : Union[str, Any]=7 , a : List[str]=True , a : str=True , a : Union[str, Any]=True , a : Optional[Any]=True , a : str=99 , a : str=[1, 1, 2] , a : Any=1 , a : str=32 , a : List[Any]=4 , a : Tuple=8 , a : Tuple=37 , a : Dict="gelu_new" , a : List[Any]=0.1 , a : Tuple=0.1 , a : Any=0.0 , a : Dict=512 , a : Any=3 , a : Dict=0.02 , a : str=3 , a : Union[str, Any]=4 , a : Union[str, Any]=None , a : Any=False , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = block_sizes
SCREAMING_SNAKE_CASE : List[Any] = num_decoder_layers
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Any = n_head
SCREAMING_SNAKE_CASE : Tuple = d_head
SCREAMING_SNAKE_CASE : Dict = d_inner
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : Optional[int] = num_choices
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : Optional[int] = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE : Tuple = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE : Any = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE : int = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE : List[Any] = self.num_hidden_layers + 2
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCamelCase ( self : List[Any] , a : Optional[Any] , a : str , a : Tuple , a : List[Any] , a : int , a : Any , a : str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = TFFunnelModel(config=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : int = model(a )
SCREAMING_SNAKE_CASE : List[str] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Optional[int] = model(a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = TFFunnelModel(config=a )
SCREAMING_SNAKE_CASE : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[str] = TFFunnelModel(config=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __UpperCamelCase ( self : Union[str, Any] , a : str , a : Optional[Any] , a : List[str] , a : List[Any] , a : int , a : List[Any] , a : Optional[Any] , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = TFFunnelBaseModel(config=a )
SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : List[Any] = model(a )
SCREAMING_SNAKE_CASE : int = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : str = model(a )
SCREAMING_SNAKE_CASE : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[Any] = TFFunnelBaseModel(config=a )
SCREAMING_SNAKE_CASE : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = TFFunnelBaseModel(config=a )
SCREAMING_SNAKE_CASE : List[str] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __UpperCamelCase ( self : str , a : int , a : str , a : List[Any] , a : Optional[Any] , a : Tuple , a : Any , a : List[str] , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TFFunnelForPreTraining(config=a )
SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : Dict = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : int , a : Tuple , a : str , a : List[Any] , a : str , a : Union[str, Any] , a : Any , a : Dict , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TFFunnelForMaskedLM(config=a )
SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : str = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[str] , a : Dict , a : Union[str, Any] , a : Any , a : Any , a : Tuple , a : Tuple , a : Optional[int] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = TFFunnelForSequenceClassification(config=a )
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : Optional[Any] , a : Optional[int] , a : Dict , a : Union[str, Any] , a : Optional[Any] , a : Union[str, Any] , a : List[Any] , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE : List[str] = TFFunnelForMultipleChoice(config=a )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : int = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Tuple , a : Optional[Any] , a : Any , a : List[str] , a : str , a : Tuple , a : Dict , a : Optional[int] , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelForTokenClassification(config=a )
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Dict , a : Optional[Any] , a : Dict , a : Optional[int] , a : str , a : Any , a : Dict , a : List[Any] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelForQuestionAnswering(config=a )
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : List[str] = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=a )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
@require_tf
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelModelTester(self , base=a )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=a )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*a )
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a ) | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = min(_a), max(_a)
SCREAMING_SNAKE_CASE : Dict = int(max_value - min_value) + 1
SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_a)]
for i in my_list:
buckets[int(i - min_value)].append(_a)
return [v for bucket in buckets for v in sorted(_a)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 25 | 1 |
def lowerCamelCase__ ( _a):
if n == 1 or not isinstance(_a , _a):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE : Optional[int] = [0, 1]
for i in range(2 , n + 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence[n]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[str] = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE : Dict = len(str(fibonacci(_a)))
return index
def lowerCamelCase__ ( _a = 1000):
return fibonacci_digits_index(_a)
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 25 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , a : int = 32 , a : int = 64 , a : int = 20 , a : int = 768 , a : int=77 , a : str=4 , a : float = 0.0 , a : str = "silu" , a : Optional[str] = None , a : Optional[str] = None , a : Optional[str] = "linear" , a : Optional[str] = "prd" , a : Optional[int] = None , a : Optional[int] = None , a : Optional[int] = None , ) -> int:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = attention_head_dim
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE : Any = additional_embeddings
SCREAMING_SNAKE_CASE : int = time_embed_dim or inner_dim
SCREAMING_SNAKE_CASE : List[Any] = embedding_proj_dim or embedding_dim
SCREAMING_SNAKE_CASE : Any = clip_embed_dim or embedding_dim
SCREAMING_SNAKE_CASE : Any = Timesteps(a , a , 0 )
SCREAMING_SNAKE_CASE : Dict = TimestepEmbedding(a , a , out_dim=a , act_fn=a )
SCREAMING_SNAKE_CASE : str = nn.Linear(a , a )
if embedding_proj_norm_type is None:
SCREAMING_SNAKE_CASE : Dict = None
elif embedding_proj_norm_type == "layer":
SCREAMING_SNAKE_CASE : Tuple = nn.LayerNorm(a )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
SCREAMING_SNAKE_CASE : str = nn.Linear(a , a )
if encoder_hid_proj_type is None:
SCREAMING_SNAKE_CASE : Tuple = None
elif encoder_hid_proj_type == "linear":
SCREAMING_SNAKE_CASE : Any = nn.Linear(a , a )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , a ) )
if added_emb_type == "prd":
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.zeros(1 , 1 , a ) )
elif added_emb_type is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(
[
BasicTransformerBlock(
a , a , a , dropout=a , activation_fn="gelu" , attention_bias=a , )
for d in range(a )
] )
if norm_in_type == "layer":
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.LayerNorm(a )
elif norm_in_type is None:
SCREAMING_SNAKE_CASE : Optional[int] = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.LayerNorm(a )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(a , a )
SCREAMING_SNAKE_CASE : Tuple = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
SCREAMING_SNAKE_CASE : Dict = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , a , persistent=a )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.zeros(1 , a ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.zeros(1 , a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
def fn_recursive_add_processors(a : str , a : torch.nn.Module , a : Dict[str, AttentionProcessor] ):
if hasattr(a , "set_processor" ):
SCREAMING_SNAKE_CASE : int = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , a , a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a , a , a )
return processors
def __UpperCamelCase ( self : Tuple , a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(a , a ) and len(a ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(a )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(a : str , a : torch.nn.Module , a : int ):
if hasattr(a , "set_processor" ):
if not isinstance(a , a ):
module.set_processor(a )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , a , a )
for name, module in self.named_children():
fn_recursive_attn_processor(a , a , a )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def __UpperCamelCase ( self : Optional[Any] , a : Union[str, Any] , a : Union[torch.Tensor, float, int] , a : torch.FloatTensor , a : Optional[torch.FloatTensor] = None , a : Optional[torch.BoolTensor] = None , a : bool = True , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = hidden_states.shape[0]
SCREAMING_SNAKE_CASE : Any = timestep
if not torch.is_tensor(a ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(a ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE : Optional[int] = timesteps * torch.ones(a , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE : Tuple = self.time_proj(a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
SCREAMING_SNAKE_CASE : List[Any] = timesteps_projected.to(dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.time_embedding(a )
if self.embedding_proj_norm is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.embedding_proj_norm(a )
SCREAMING_SNAKE_CASE : str = self.embedding_proj(a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.encoder_hidden_states_proj(a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.positional_embedding.to(hidden_states.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
if encoder_hidden_states is not None:
additional_embeds.append(a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
SCREAMING_SNAKE_CASE : List[Any] = hidden_states[:, None, :]
SCREAMING_SNAKE_CASE : Optional[Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE : Any = self.prd_embedding.to(hidden_states.dtype ).expand(a , -1 , -1 )
additional_embeds.append(a )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(
a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
SCREAMING_SNAKE_CASE : str = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
SCREAMING_SNAKE_CASE : Any = F.pad(
a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
SCREAMING_SNAKE_CASE : List[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
SCREAMING_SNAKE_CASE : int = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
SCREAMING_SNAKE_CASE : Optional[int] = F.pad(a , (0, self.additional_embeddings) , value=0.0 )
SCREAMING_SNAKE_CASE : Optional[int] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
SCREAMING_SNAKE_CASE : Optional[int] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm_in(a )
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE : str = block(a , attention_mask=a )
SCREAMING_SNAKE_CASE : List[str] = self.norm_out(a )
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states[:, -1]
else:
SCREAMING_SNAKE_CASE : Any = hidden_states[:, additional_embeddings_len:]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.proj_to_clip_embeddings(a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=a )
def __UpperCamelCase ( self : str , a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents | 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(a , "num_attention_heads" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , a : List[str] , a : List[str]=13 , a : Union[str, Any]=64 , a : List[Any]=3 , a : Dict=3 , a : int=2 , a : str=1 , a : str=16 , a : Union[str, Any]=[128, 256, 384] , a : int=[4, 6, 8] , a : List[Any]=[2, 3, 4] , a : Dict=[16, 16, 16] , a : Union[str, Any]=0 , a : Any=[2, 2, 2] , a : Tuple=[2, 2, 2] , a : Optional[Any]=0.02 , a : Optional[int]=True , a : List[str]=True , a : Dict=2 , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = stride
SCREAMING_SNAKE_CASE : Optional[Any] = padding
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : Dict = key_dim
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : int = attention_ratio
SCREAMING_SNAKE_CASE : int = mlp_ratio
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : Any = initializer_range
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __UpperCamelCase ( self : int , a : Any , a : Tuple , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = LevitModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a )
SCREAMING_SNAKE_CASE : List[str] = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : Optional[int] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __UpperCamelCase ( self : Any , a : List[Any] , a : Union[str, Any] , a : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = LevitForImageClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = LevitModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not output attentions" )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
def check_hidden_states_output(a : Dict , a : List[Any] , a : str ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.hidden_states
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths ) + 1
self.assertEqual(len(a ) , a )
SCREAMING_SNAKE_CASE : List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : Optional[int] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE : Any = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(a , a , a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple , a : int , a : str , a : List[str]=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : str = model_class(a )
model.to(a )
model.train()
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(a , a , return_labels=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a ).loss
loss.backward()
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : str = True
for model_class in self.all_model_classes:
if model_class in get_values(a ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : Any = model_class(a )
model.gradient_checkpointing_enable()
model.to(a )
model.train()
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(a , a , return_labels=a )
SCREAMING_SNAKE_CASE : Dict = model(**a ).loss
loss.backward()
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE : str = problem_type["title"]
SCREAMING_SNAKE_CASE : List[str] = problem_type["num_labels"]
SCREAMING_SNAKE_CASE : str = model_class(a )
model.to(a )
model.train()
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(a , a , return_labels=a )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a ) as warning_list:
SCREAMING_SNAKE_CASE : List[Any] = model(**a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = LevitModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
a )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([1.0448, -0.3745, -1.8317] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) | 25 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 | 1 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
a_ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a_ = concatenate_datasets
a_ = DownloadConfig
a_ = DownloadManager
a_ = DownloadMode
a_ = DownloadConfig
a_ = DownloadMode
a_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 25 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
import math
def lowerCamelCase__ ( _a):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_a) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( _a = 0.1):
SCREAMING_SNAKE_CASE : int = 3
SCREAMING_SNAKE_CASE : List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(_a)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( _a , _a = None , _a = None):
if start is None:
SCREAMING_SNAKE_CASE : List[str] = 0
if end is None:
SCREAMING_SNAKE_CASE : List[Any] = len(_a) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE : Any = (start + end) // 2
slowsort(_a , _a , _a)
slowsort(_a , mid + 1 , _a)
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = sequence[mid], sequence[end]
slowsort(_a , _a , end - 1)
if __name__ == "__main__":
from doctest import testmod
testmod() | 25 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Tuple = len(_a) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE : Optional[int] = i + 1
else:
SCREAMING_SNAKE_CASE : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''') | 25 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =StableDiffusionInstructPixaPixPipeline
lowerCamelCase__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCamelCase__ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ =IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE : Optional[Any] = PNDMScheduler(skip_prk_steps=a )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextModel(a )
SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __UpperCamelCase ( self : Tuple , a : str , a : Dict=0 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
SCREAMING_SNAKE_CASE : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(a ) ).convert("RGB" )
if str(a ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(a )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=a ).manual_seed(a )
SCREAMING_SNAKE_CASE : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a )
SCREAMING_SNAKE_CASE : Any = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**a ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline(**a )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE : Tuple = "french fries"
SCREAMING_SNAKE_CASE : Dict = sd_pipe(**a , negative_prompt=a )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionInstructPixaPixPipeline(**a )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE : int = [inputs["prompt"]] * 2
SCREAMING_SNAKE_CASE : List[str] = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : str = torch.from_numpy(a ).unsqueeze(0 ).to(a )
SCREAMING_SNAKE_CASE : Optional[int] = image / 2 + 0.5
SCREAMING_SNAKE_CASE : str = image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE : int = image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE : Dict = sd_pipe(**a ).images
SCREAMING_SNAKE_CASE : str = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[str] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : int = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" )
SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**a )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(**a ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = [round(a , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(a ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**a )
SCREAMING_SNAKE_CASE : str = VaeImageProcessor(do_resize=a , do_normalize=a )
SCREAMING_SNAKE_CASE : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs_by_type(a , input_image_type="pt" ) )[0]
SCREAMING_SNAKE_CASE : Optional[int] = components["vae"]
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs_by_type(a , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE : List[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE : List[Any] = pipe(**a )[0]
SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(a , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any , a : Union[str, Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
SCREAMING_SNAKE_CASE : Tuple = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs()
SCREAMING_SNAKE_CASE : List[Any] = pipe(**a ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=a )
SCREAMING_SNAKE_CASE : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : int = self.get_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**a ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=a )
SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : int = self.get_inputs()
SCREAMING_SNAKE_CASE : Any = pipe(**a ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def callback_fn(a : int , a : int , a : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE : int = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[str] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
SCREAMING_SNAKE_CASE : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE : str = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[str] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=a , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : int = self.get_inputs()
pipe(**a , callback=a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=a , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : List[str] = self.get_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**a )
SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE : List[str] = inputs["image"].resize((504, 504) )
SCREAMING_SNAKE_CASE : List[Any] = "timbrooks/instruct-pix2pix"
SCREAMING_SNAKE_CASE : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
a , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : List[Any] = pipe(**a )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
SCREAMING_SNAKE_CASE : Tuple = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 | 25 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='roformer'
def __init__( self : Dict , a : Any=5_0000 , a : List[Any]=None , a : str=768 , a : str=12 , a : Tuple=12 , a : Optional[Any]=3072 , a : List[str]="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=1536 , a : List[str]=2 , a : Tuple=0.02 , a : Any=1e-12 , a : Optional[int]=0 , a : Union[str, Any]=False , a : int=True , **a : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = rotary_value
SCREAMING_SNAKE_CASE : int = use_cache
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "sequence"}
SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cécé herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Dict = torch.load(_a , lambda _a , _a: storage)
SCREAMING_SNAKE_CASE : str = AbsSummarizer(_a , torch.device("cpu") , _a)
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(_a , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("This is sample éàalj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample 3 éàalj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids
SCREAMING_SNAKE_CASE : List[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : Optional[int] = original(_a , _a , _a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Dict = original.generator(_a)
SCREAMING_SNAKE_CASE : Any = new_model(
_a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Tuple = new_model.generator(_a)
SCREAMING_SNAKE_CASE : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : int = torch.allclose(_a , _a , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 25 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : int = (32, 32)
SCREAMING_SNAKE_CASE : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a )
return image
@property
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
@property
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
def extract(*a : int , **a : Union[str, Any] ):
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones([0] )
def __UpperCamelCase ( self : str , a : List[Any] ) -> List[str]:
"""simple docstring"""
self.pixel_values.to(a )
return self
return Out()
return extract
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : int = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : Dict = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_one=a , )
SCREAMING_SNAKE_CASE : Tuple = self.dummy_vae
SCREAMING_SNAKE_CASE : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : int = StableDiffusionPipeline(
unet=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , safety_checker=a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : List[str] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=a ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = sd_pipe([prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=a ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sd_pipe(
[prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a , )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : List[str] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : Tuple = PNDMScheduler(skip_prk_steps=a )
SCREAMING_SNAKE_CASE : Dict = self.dummy_vae
SCREAMING_SNAKE_CASE : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionPipeline(
unet=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , safety_checker=a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=a ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=a ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = sd_pipe(
[prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a , )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=a )
assert isinstance(a , a )
assert isinstance(pipe.scheduler , a )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE : Tuple = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionPipeline.from_pretrained(a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE : Dict = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : Dict = PNDMScheduler(skip_prk_steps=a )
SCREAMING_SNAKE_CASE : Dict = self.dummy_vae
SCREAMING_SNAKE_CASE : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
SCREAMING_SNAKE_CASE : Tuple = unet.half()
SCREAMING_SNAKE_CASE : Tuple = vae.half()
SCREAMING_SNAKE_CASE : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline(
unet=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , safety_checker=a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Tuple = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a )
SCREAMING_SNAKE_CASE : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[int] = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
SCREAMING_SNAKE_CASE : Union[str, Any] = 40_0366_0346
SCREAMING_SNAKE_CASE : Tuple = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : Dict = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE : Any = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = "padme amidala taking a bath artwork, safe for work, no nudity"
SCREAMING_SNAKE_CASE : Optional[int] = 27_3497_1755
SCREAMING_SNAKE_CASE : Optional[Any] = 7
SCREAMING_SNAKE_CASE : str = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : Dict = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
SCREAMING_SNAKE_CASE : int = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : List[str] = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
SCREAMING_SNAKE_CASE : List[str] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
SCREAMING_SNAKE_CASE : str = 10_4435_5234
SCREAMING_SNAKE_CASE : List[str] = 12
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Tuple = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 25 | 1 |
from pathlib import Path
import fire
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = Path(_a)
SCREAMING_SNAKE_CASE : Dict = Path(_a)
dest_dir.mkdir(exist_ok=_a)
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE : List[Any] = [x.rstrip() for x in list(path.open().readlines())][:n]
SCREAMING_SNAKE_CASE : str = dest_dir.joinpath(path.name)
print(_a)
dest_path.open("w").write("\n".join(_a))
if __name__ == "__main__":
fire.Fire(minify) | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 1 |
from string import ascii_uppercase
a_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase__ ( _a , _a):
if isinstance(_a , _a):
raise TypeError("int() can't convert non-string with explicit base")
if num < 0:
raise ValueError("parameter must be positive int")
if isinstance(_a , _a):
raise TypeError("'str' object cannot be interpreted as an integer")
if isinstance(_a , _a):
raise TypeError("'float' object cannot be interpreted as an integer")
if base in (0, 1):
raise ValueError("base must be >= 2")
if base > 36:
raise ValueError("base must be <= 36")
SCREAMING_SNAKE_CASE : Union[str, Any] = ""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while div != 1:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = divmod(_a , _a)
if base >= 11 and 9 < mod < 36:
SCREAMING_SNAKE_CASE : Dict = ALPHABET_VALUES[str(_a)]
else:
SCREAMING_SNAKE_CASE : Dict = str(_a)
new_value += actual_value
SCREAMING_SNAKE_CASE : List[Any] = num // base
SCREAMING_SNAKE_CASE : Tuple = div
if div == 0:
return str(new_value[::-1])
elif div == 1:
new_value += str(_a)
return str(new_value[::-1])
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
) | 25 |
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 25 | 1 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE : str = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
SCREAMING_SNAKE_CASE : str = f"{src_lang}-{tgt_lang}"
SCREAMING_SNAKE_CASE : Tuple = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_a , exist_ok=_a)
SCREAMING_SNAKE_CASE : Any = os.path.join(_a , "README.md")
print(f"Generating {path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(_a)
# make sure we are under the root of the project
a_ = Path(__file__).resolve().parent.parent.parent
a_ = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ , a_ , a_ = model_name.split('-')
a_ = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | 1 |
from ...configuration_utils import PretrainedConfig
a_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='tapas'
def __init__( self : int , a : Optional[Any]=3_0522 , a : Optional[Any]=768 , a : Dict=12 , a : str=12 , a : str=3072 , a : Optional[int]="gelu" , a : Optional[Any]=0.1 , a : Any=0.1 , a : List[str]=1024 , a : str=[3, 256, 256, 2, 256, 256, 10] , a : Tuple=0.02 , a : List[Any]=1e-12 , a : Tuple=0 , a : int=10.0 , a : Optional[Any]=0 , a : Optional[int]=1.0 , a : Optional[int]=None , a : List[Any]=1.0 , a : Optional[int]=False , a : int=None , a : Optional[int]=1.0 , a : List[Any]=1.0 , a : List[str]=False , a : int=False , a : Any="ratio" , a : Tuple=None , a : Optional[int]=None , a : List[str]=64 , a : str=32 , a : Union[str, Any]=False , a : Optional[Any]=True , a : Union[str, Any]=False , a : List[Any]=False , a : Optional[int]=True , a : int=False , a : List[Any]=None , a : Optional[int]=None , **a : int , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_sizes
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE : Union[str, Any] = positive_label_weight
SCREAMING_SNAKE_CASE : Union[str, Any] = num_aggregation_labels
SCREAMING_SNAKE_CASE : int = aggregation_loss_weight
SCREAMING_SNAKE_CASE : List[Any] = use_answer_as_supervision
SCREAMING_SNAKE_CASE : List[str] = answer_loss_importance
SCREAMING_SNAKE_CASE : Optional[int] = use_normalized_answer_loss
SCREAMING_SNAKE_CASE : Dict = huber_loss_delta
SCREAMING_SNAKE_CASE : List[str] = temperature
SCREAMING_SNAKE_CASE : int = aggregation_temperature
SCREAMING_SNAKE_CASE : Optional[int] = use_gumbel_for_cells
SCREAMING_SNAKE_CASE : Optional[int] = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE : Tuple = average_approximation_function
SCREAMING_SNAKE_CASE : Optional[int] = cell_selection_preference
SCREAMING_SNAKE_CASE : Optional[Any] = answer_loss_cutoff
SCREAMING_SNAKE_CASE : int = max_num_rows
SCREAMING_SNAKE_CASE : Tuple = max_num_columns
SCREAMING_SNAKE_CASE : Optional[Any] = average_logits_per_cell
SCREAMING_SNAKE_CASE : str = select_one_column
SCREAMING_SNAKE_CASE : Any = allow_empty_column_selection
SCREAMING_SNAKE_CASE : Tuple = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE : Tuple = reset_position_index_per_cell
SCREAMING_SNAKE_CASE : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE : str = aggregation_labels
SCREAMING_SNAKE_CASE : Any = no_aggregation_label_index
if isinstance(self.aggregation_labels , a ):
SCREAMING_SNAKE_CASE : str = {int(a ): v for k, v in aggregation_labels.items()} | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 |
from math import factorial, pi
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_sin() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : int = float(_a)
SCREAMING_SNAKE_CASE : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(_a))
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_cos() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : str = float(_a)
SCREAMING_SNAKE_CASE : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(_a))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15)) | 25 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = [True] * limit
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Any = True
for i in range(3 , int(limit**0.5 + 1) , 2):
SCREAMING_SNAKE_CASE : List[str] = i * 2
while index < limit:
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Dict = index + i
SCREAMING_SNAKE_CASE : List[Any] = [2]
for i in range(3 , _a , 2):
if is_prime[i]:
primes.append(_a)
return primes
def lowerCamelCase__ ( _a = 1000000):
SCREAMING_SNAKE_CASE : List[Any] = prime_sieve(_a)
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(len(_a)):
for j in range(i + length , len(_a)):
SCREAMING_SNAKE_CASE : Tuple = sum(primes[i:j])
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE : Optional[Any] = j - i
SCREAMING_SNAKE_CASE : int = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''') | 25 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , a : Any , a : str=13 , a : Union[str, Any]=7 , a : int=True , a : Optional[Any]=True , a : Optional[int]=True , a : int=True , a : List[Any]=99 , a : List[Any]=16 , a : str=36 , a : Dict=6 , a : str=6 , a : List[str]=6 , a : Any=37 , a : Optional[Any]="gelu" , a : List[str]=0.1 , a : Tuple=0.1 , a : int=512 , a : Optional[Any]=16 , a : List[str]=2 , a : List[str]=0.02 , a : int=3 , a : int=4 , a : Union[str, Any]=None , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : int = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = embedding_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_hidden_groups
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : str = scope
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __UpperCamelCase ( self : Union[str, Any] , a : Optional[int] , a : Optional[Any] , a : int , a : Dict , a : int , a : Union[str, Any] , a : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = AlbertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(a , attention_mask=a , token_type_ids=a )
SCREAMING_SNAKE_CASE : Optional[int] = model(a , token_type_ids=a )
SCREAMING_SNAKE_CASE : Dict = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : str , a : int , a : Optional[int] , a : Optional[Any] , a : Dict , a : str , a : int , a : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AlbertForPreTraining(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
a , attention_mask=a , token_type_ids=a , labels=a , sentence_order_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __UpperCamelCase ( self : Any , a : Any , a : Any , a : Optional[int] , a : str , a : int , a : str , a : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AlbertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : str , a : Tuple , a : Optional[Any] , a : Union[str, Any] , a : Optional[int] , a : Optional[int] , a : List[Any] , a : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AlbertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any] , a : Tuple , a : Tuple , a : int , a : List[Any] , a : List[Any] , a : Dict , a : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = AlbertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : int = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , a : List[str] , a : str , a : Optional[int] , a : Optional[int] , a : str , a : List[Any] , a : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = AlbertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : str , a : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Union[str, Any] , a : List[Any] , a : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_choices
SCREAMING_SNAKE_CASE : Any = AlbertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : int = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
def __UpperCamelCase ( self : List[str] , a : int , a : List[str] , a : Optional[int]=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
SCREAMING_SNAKE_CASE : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AlbertModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=a , hidden_size=37 )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Any = type
self.model_tester.create_and_check_model(*a )
@slow
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = AlbertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = AlbertModel.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(_a , _a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = emb.weight.shape
SCREAMING_SNAKE_CASE : int = nn.Linear(_a , _a , bias=_a)
SCREAMING_SNAKE_CASE : str = emb.weight.data
return lin_layer
def lowerCamelCase__ ( _a , _a="facebook/mbart-large-en-ro" , _a=False , _a=False):
SCREAMING_SNAKE_CASE : List[str] = torch.load(_a , map_location="cpu")["model"]
remove_ignore_keys_(_a)
SCREAMING_SNAKE_CASE : List[Any] = state_dict["encoder.embed_tokens.weight"].shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = MBartConfig.from_pretrained(_a , vocab_size=_a)
if mbart_aa and finetuned:
SCREAMING_SNAKE_CASE : int = "relu"
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict["decoder.embed_tokens.weight"]
SCREAMING_SNAKE_CASE : Optional[Any] = MBartForConditionalGeneration(_a)
model.model.load_state_dict(_a)
if finetuned:
SCREAMING_SNAKE_CASE : str = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 25 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a_ = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a_ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = create_model(
"HTSAT-tiny" , "roberta" , _a , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_a , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Optional[Any] = r".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE : int = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE : Any = key.replace(_a , _a)
if re.match(_a , _a):
# replace sequential layers with list
SCREAMING_SNAKE_CASE : Optional[int] = re.match(_a , _a).group(1)
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_a)//3}.linear.")
elif re.match(_a , _a):
SCREAMING_SNAKE_CASE : str = int(re.match(_a , _a).group(1))
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE : List[str] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}.")
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE : Optional[Any] = value
SCREAMING_SNAKE_CASE : Dict = mixed_qkv.size(0) // 3
SCREAMING_SNAKE_CASE : Dict = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE : Dict = query_layer
SCREAMING_SNAKE_CASE : str = key_layer
SCREAMING_SNAKE_CASE : List[str] = value_layer
else:
SCREAMING_SNAKE_CASE : Any = value
return model_state_dict
def lowerCamelCase__ ( _a , _a , _a , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = init_clap(_a , enable_fusion=_a)
clap_model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = clap_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(_a)
SCREAMING_SNAKE_CASE : Any = ClapConfig()
SCREAMING_SNAKE_CASE : Tuple = enable_fusion
SCREAMING_SNAKE_CASE : str = ClapModel(_a)
# ignore the spectrogram embedding layer
model.load_state_dict(_a , strict=_a)
model.save_pretrained(_a)
transformers_config.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 25 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1):
for perpendicular in range(_a , max_perimeter + 1):
SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_a):
SCREAMING_SNAKE_CASE : int = int(base + perpendicular + hypotenuse)
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ ( _a = 1000):
SCREAMING_SNAKE_CASE : List[str] = pythagorean_triple(_a)
return triplets.most_common(1)[0][0]
if __name__ == "__main__":
print(F'''Perimeter {solution()} has maximum solutions''') | 25 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ = 16
a_ = 32
def lowerCamelCase__ ( _a , _a = 16 , _a = "bert-base-cased"):
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(_a)
SCREAMING_SNAKE_CASE : str = load_dataset("glue" , "mrpc")
def tokenize_function(_a):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_a , max_length=_a)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE : Optional[int] = datasets.map(
_a , batched=_a , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_a)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels")
def collate_fn(_a):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_a , padding="max_length" , max_length=128 , return_tensors="pt")
return tokenizer.pad(_a , padding="longest" , return_tensors="pt")
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_a , collate_fn=_a , batch_size=_a)
SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_a , collate_fn=_a , batch_size=_a)
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( _a , _a):
# Initialize accelerator
SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : int = config["lr"]
SCREAMING_SNAKE_CASE : Any = int(config["num_epochs"])
SCREAMING_SNAKE_CASE : Tuple = int(config["seed"])
SCREAMING_SNAKE_CASE : Tuple = int(config["batch_size"])
SCREAMING_SNAKE_CASE : Dict = args.model_name_or_path
set_seed(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = get_dataloaders(_a , _a , _a)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained(_a , return_dict=_a)
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE : Dict = optimizer_cls(params=model.parameters() , lr=_a)
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = (len(_a) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=0 , num_training_steps=_a , )
else:
SCREAMING_SNAKE_CASE : Dict = DummyScheduler(_a , total_num_steps=_a , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(
_a , _a , _a , _a , _a)
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE : Dict = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE : Any = 0
# Now we train the model
SCREAMING_SNAKE_CASE : Optional[int] = evaluate.load("glue" , "mrpc")
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : str = {}
for epoch in range(_a , _a):
model.train()
for step, batch in enumerate(_a):
SCREAMING_SNAKE_CASE : Dict = model(**_a)
SCREAMING_SNAKE_CASE : int = outputs.loss
SCREAMING_SNAKE_CASE : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_a)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for step, batch in enumerate(_a):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**_a)
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = accelerator.gather(
(predictions, batch["labels"])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_a) - 1:
SCREAMING_SNAKE_CASE : str = predictions[: len(eval_dataloader.dataset) - samples_seen]
SCREAMING_SNAKE_CASE : Tuple = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_a , references=_a , )
SCREAMING_SNAKE_CASE : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _a)
SCREAMING_SNAKE_CASE : Tuple = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
SCREAMING_SNAKE_CASE : str = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json") , "w") as f:
json.dump(_a , _a)
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
parser.add_argument(
"--model_name_or_path" , type=_a , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_a , )
parser.add_argument(
"--output_dir" , type=_a , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=_a , default=_a , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=_a , default=3 , help="Number of train epochs." , )
SCREAMING_SNAKE_CASE : str = parser.parse_args()
SCREAMING_SNAKE_CASE : List[Any] = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_a , _a)
if __name__ == "__main__":
main() | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = min(_a), max(_a)
SCREAMING_SNAKE_CASE : Dict = int(max_value - min_value) + 1
SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_a)]
for i in my_list:
buckets[int(i - min_value)].append(_a)
return [v for bucket in buckets for v in sorted(_a)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 25 | 1 |
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : int = len(_a)
SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(size + 1)] for _ in range(_a)]
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
for row in range(_a):
for col in range(_a):
SCREAMING_SNAKE_CASE : List[Any] = matrix[row][col]
SCREAMING_SNAKE_CASE : int = vector[row][0]
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE : List[str] = max((abs(augmented[rowa][col]), rowa) for rowa in range(_a , _a))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _a):
SCREAMING_SNAKE_CASE : Dict = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE : int = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _a):
for row in range(_a):
SCREAMING_SNAKE_CASE : List[Any] = augmented[row][col] / augmented[col][col]
for cola in range(_a , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(_a)
]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = len(_a)
SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(_a)] for _ in range(_a)]
SCREAMING_SNAKE_CASE : Matrix = [[0] for _ in range(_a)]
SCREAMING_SNAKE_CASE : Matrix
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
for x_val, y_val in enumerate(_a):
for col in range(_a):
SCREAMING_SNAKE_CASE : Any = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE : Tuple = y_val
SCREAMING_SNAKE_CASE : Any = solve(_a , _a)
def interpolated_func(_a) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(_a))
return interpolated_func
def lowerCamelCase__ ( _a):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _a = question_function , _a = 10):
SCREAMING_SNAKE_CASE : list[int] = [func(_a) for x_val in range(1 , order + 1)]
SCREAMING_SNAKE_CASE : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Callable[[int], int]
SCREAMING_SNAKE_CASE : int
for poly in polynomials:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
while func(_a) == poly(_a):
x_val += 1
ret += poly(_a)
return ret
if __name__ == "__main__":
print(F'''{solution() = }''') | 25 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 25 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : str , **a : int ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
import datasets
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Dict = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE : Tuple = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
"score": ANY(a ),
"label": ANY(a ),
"box": {"xmin": ANY(a ), "ymin": ANY(a ), "xmax": ANY(a ), "ymax": ANY(a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE : Dict = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(a )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(a )
SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : Tuple = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0.9985
SCREAMING_SNAKE_CASE : int = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE : List[str] = pipeline("object-detection" , model=a )
SCREAMING_SNAKE_CASE : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE : Dict = 0.9993
SCREAMING_SNAKE_CASE : str = pipeline("object-detection" , model=a , threshold=a )
SCREAMING_SNAKE_CASE : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 25 | 1 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : list[list[str]] = [[] for _ in range(_a)]
SCREAMING_SNAKE_CASE : Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative")
if key == 1 or len(_a) <= key:
return input_string
for position, character in enumerate(_a):
SCREAMING_SNAKE_CASE : Tuple = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : str = min(_a , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append(_a)
SCREAMING_SNAKE_CASE : Optional[int] = ["".join(_a) for row in temp_grid]
SCREAMING_SNAKE_CASE : Union[str, Any] = "".join(_a)
return output_string
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : int = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative")
if key == 1:
return input_string
SCREAMING_SNAKE_CASE : list[list[str]] = [[] for _ in range(_a)] # generates template
for position in range(len(_a)):
SCREAMING_SNAKE_CASE : Dict = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : int = min(_a , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append("*")
SCREAMING_SNAKE_CASE : Any = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE : Tuple = input_string[counter : counter + len(_a)]
grid.append(list(_a))
counter += len(_a)
SCREAMING_SNAKE_CASE : Optional[int] = "" # reads as zigzag
for position in range(len(_a)):
SCREAMING_SNAKE_CASE : int = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : Union[str, Any] = min(_a , lowest * 2 - num) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0)
return output_string
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[str] = {}
for key_guess in range(1 , len(_a)): # tries every key
SCREAMING_SNAKE_CASE : Tuple = decrypt(_a , _a)
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase__ ( _a , _a , _a):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero.")
SCREAMING_SNAKE_CASE : Tuple = b * b - 4 * a * c
SCREAMING_SNAKE_CASE : Dict = (-b + sqrt(_a)) / (2 * a)
SCREAMING_SNAKE_CASE : List[Any] = (-b - sqrt(_a)) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = quadratic_roots(a=5 , b=6 , c=1)
print(f"The solutions are: {solutiona} and {solutiona}")
if __name__ == "__main__":
main() | 25 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 1 |
from collections.abc import Iterable
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a : int | None = None ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = value
SCREAMING_SNAKE_CASE : Node | None = None # Added in order to delete a node easier
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"{self.value}": (self.left, self.right)} , indent=1 )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , a : Node | None = None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = root
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return str(self.root )
def __UpperCamelCase ( self : Dict , a : Node , a : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
SCREAMING_SNAKE_CASE : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(a ): # If it is the right children
SCREAMING_SNAKE_CASE : Optional[Any] = new_children
else:
SCREAMING_SNAKE_CASE : int = new_children
else:
SCREAMING_SNAKE_CASE : Optional[Any] = new_children
def __UpperCamelCase ( self : Optional[Any] , a : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __UpperCamelCase ( self : Any ) -> bool:
"""simple docstring"""
return self.root is None
def __UpperCamelCase ( self : str , a : Union[str, Any] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = Node(a ) # create a new Node
if self.empty(): # if Tree is empty
SCREAMING_SNAKE_CASE : Union[str, Any] = new_node # set its root
else: # Tree is not empty
SCREAMING_SNAKE_CASE : Any = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
SCREAMING_SNAKE_CASE : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
SCREAMING_SNAKE_CASE : str = parent_node.left
else:
if parent_node.right is None:
SCREAMING_SNAKE_CASE : int = new_node
break
else:
SCREAMING_SNAKE_CASE : Any = parent_node.right
SCREAMING_SNAKE_CASE : Optional[Any] = parent_node
def __UpperCamelCase ( self : int , *a : Optional[Any] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(a )
def __UpperCamelCase ( self : Tuple , a : Union[str, Any] ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
SCREAMING_SNAKE_CASE : List[Any] = node.left if value < node.value else node.right
return node
def __UpperCamelCase ( self : str , a : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
SCREAMING_SNAKE_CASE : List[str] = self.root
if not self.empty():
while node.right is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = node.right
return node
def __UpperCamelCase ( self : Any , a : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
SCREAMING_SNAKE_CASE : Any = self.root
if self.root is None:
return None
if not self.empty():
SCREAMING_SNAKE_CASE : List[Any] = self.root
while node.left is not None:
SCREAMING_SNAKE_CASE : List[str] = node.left
return node
def __UpperCamelCase ( self : Union[str, Any] , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.search(a ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(a , a )
elif node.left is None: # Has only right children
self.__reassign_nodes(a , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(a , node.left )
else:
SCREAMING_SNAKE_CASE : Any = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
SCREAMING_SNAKE_CASE : Tuple = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __UpperCamelCase ( self : Union[str, Any] , a : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __UpperCamelCase ( self : Tuple , a : int=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __UpperCamelCase ( self : Tuple , a : list , a : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(a , node.left )
arr.append(node.value )
self.inorder(a , node.right )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Node ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[int] = []
self.inorder(a , a ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if curr_node is not None:
SCREAMING_SNAKE_CASE : List[Any] = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
SCREAMING_SNAKE_CASE : str = BinarySearchTree()
for i in testlist:
t.insert(_a)
# Prints all the elements of the list in order traversal
print(_a)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(_a)
print(_a)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.