code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowercase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config ,"""steps_offset""" ) and scheduler.config.steps_offset != 1:
snake_case : Any = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" ,"""1.0.0""" ,SCREAMING_SNAKE_CASE_ ,standard_warn=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = dict(scheduler.config )
snake_case : str = 1
snake_case : int = FrozenDict(SCREAMING_SNAKE_CASE_ )
if hasattr(scheduler.config ,"""skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
snake_case : int = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" ,"""1.0.0""" ,SCREAMING_SNAKE_CASE_ ,standard_warn=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = dict(scheduler.config )
snake_case : Optional[int] = True
snake_case : List[Any] = FrozenDict(SCREAMING_SNAKE_CASE_ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE_ ,segmentation_processor=SCREAMING_SNAKE_CASE_ ,vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,unet=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,safety_checker=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case : str = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self ):
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet ,"""_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 512 ,SCREAMING_SNAKE_CASE_ = 512 ,SCREAMING_SNAKE_CASE_ = 50 ,SCREAMING_SNAKE_CASE_ = 7.5 ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 1 ,SCREAMING_SNAKE_CASE_ = 0.0 ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = "pil" ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 1 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Tuple = self.segmentation_processor(
text=[text] ,images=[image] ,padding="""max_length""" ,return_tensors="""pt""" ).to(self.device )
snake_case : Union[str, Any] = self.segmentation_model(**SCREAMING_SNAKE_CASE_ )
snake_case : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
snake_case : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
snake_case : Union[str, Any] = StableDiffusionInpaintPipeline(
vae=self.vae ,text_encoder=self.text_encoder ,tokenizer=self.tokenizer ,unet=self.unet ,scheduler=self.scheduler ,safety_checker=self.safety_checker ,feature_extractor=self.feature_extractor ,)
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE_ ,image=SCREAMING_SNAKE_CASE_ ,mask_image=SCREAMING_SNAKE_CASE_ ,height=SCREAMING_SNAKE_CASE_ ,width=SCREAMING_SNAKE_CASE_ ,num_inference_steps=SCREAMING_SNAKE_CASE_ ,guidance_scale=SCREAMING_SNAKE_CASE_ ,negative_prompt=SCREAMING_SNAKE_CASE_ ,num_images_per_prompt=SCREAMING_SNAKE_CASE_ ,eta=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,latents=SCREAMING_SNAKE_CASE_ ,output_type=SCREAMING_SNAKE_CASE_ ,return_dict=SCREAMING_SNAKE_CASE_ ,callback=SCREAMING_SNAKE_CASE_ ,callback_steps=SCREAMING_SNAKE_CASE_ ,)
| 36 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 1 |
def lowercase ( __A : Tuple , __A : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = [1]
for i in range(2 , __A ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
snake_case : List[str] = []
snake_case : Optional[Any] = list(range(__A ) )
# Find permutation
while factorials:
snake_case : str = factorials.pop()
snake_case , snake_case : str = divmod(__A , __A )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase : Optional[int] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Any = size if size is not None else {"""shortest_edge""": 224}
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Tuple = do_resize
snake_case : int = size
snake_case : int = resample
snake_case : int = do_center_crop
snake_case : Any = crop_size
snake_case : Optional[int] = do_rescale
snake_case : Dict = rescale_factor
snake_case : Dict = do_normalize
snake_case : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case : Any = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case : Any = do_convert_rgb
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ ,mean=SCREAMING_SNAKE_CASE_ ,std=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = do_resize if do_resize is not None else self.do_resize
snake_case : Optional[int] = size if size is not None else self.size
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""size""" ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : int = resample if resample is not None else self.resample
snake_case : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case : Optional[Any] = image_std if image_std is not None else self.image_std
snake_case : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case : Dict = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case : List[str] = [convert_to_rgb(SCREAMING_SNAKE_CASE_ ) for image in images]
# All transformations expect numpy arrays.
snake_case : int = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : str = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[int] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : int = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=SCREAMING_SNAKE_CASE_ ,mean=SCREAMING_SNAKE_CASE_ ,std=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
| 36 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 1 |
def lowercase ( __A : list[int] ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
snake_case : Optional[int] = sum(__A ) / len(__A ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : List[str] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = -1
snake_case : Union[str, Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case : Any = TextStreamer(SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ ,streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case : Any = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = -1
snake_case : Optional[int] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ )
snake_case : Any = tokenizer.decode(greedy_ids[0] )
snake_case : List[str] = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
snake_case : List[Any] = Thread(target=model.generate ,kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
snake_case : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Any = -1
snake_case : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ )
snake_case : str = greedy_ids[:, input_ids.shape[1] :]
snake_case : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case : str = TextStreamer(SCREAMING_SNAKE_CASE_ ,skip_prompt=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ ,streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case : str = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
snake_case : Dict = AutoTokenizer.from_pretrained("""distilgpt2""" )
snake_case : Tuple = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = -1
snake_case : Any = torch.ones((1, 5) ,device=SCREAMING_SNAKE_CASE_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case : List[Any] = TextStreamer(SCREAMING_SNAKE_CASE_ ,skip_special_tokens=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=1 ,do_sample=SCREAMING_SNAKE_CASE_ ,streamer=SCREAMING_SNAKE_CASE_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case : Dict = cs.out[:-1] # Remove the final "\n"
snake_case : Tuple = tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = -1
snake_case : int = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ ,timeout=0.0_01 )
snake_case : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
snake_case : List[str] = Thread(target=model.generate ,kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
snake_case : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
| 36 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowercase : Any = logging.get_logger(__name__)
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if not conversation_id:
snake_case : str = uuid.uuida()
if past_user_inputs is None:
snake_case : Any = []
if generated_responses is None:
snake_case : Dict = []
snake_case : uuid.UUID = conversation_id
snake_case : List[str] = past_user_inputs
snake_case : List[str] = generated_responses
snake_case : Optional[str] = text
def __eq__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
snake_case : Optional[Any] = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
snake_case : Any = text
def snake_case_ ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case : Tuple = None
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self.generated_responses.append(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
snake_case : List[str] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
snake_case : List[Any] = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
snake_case , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if self.tokenizer.pad_token_id is None:
snake_case : int = self.tokenizer.eos_token
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = {}
snake_case : str = {}
snake_case : List[Any] = {}
if min_length_for_response is not None:
snake_case : Optional[int] = min_length_for_response
if minimum_tokens is not None:
snake_case : Dict = minimum_tokens
if "max_length" in generate_kwargs:
snake_case : Optional[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0 ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ ,num_workers=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) == 1:
return outputs[0]
return outputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=32 ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ):
snake_case : Optional[Any] = self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case : List[str] = self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE_ )
if self.framework == "pt":
snake_case : Dict = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case : Dict = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=10 ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
snake_case : Any = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
snake_case : List[str] = max_length - minimum_tokens
snake_case : List[str] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
snake_case : Dict = model_inputs["""attention_mask"""][:, -trim:]
snake_case : Tuple = model_inputs.pop("""conversation""" )
snake_case : Tuple = max_length
snake_case : List[Any] = self.model.generate(**SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if self.model.config.is_encoder_decoder:
snake_case : Tuple = 1
else:
snake_case : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=True ):
'''simple docstring'''
snake_case : Optional[int] = model_outputs["""output_ids"""]
snake_case : List[str] = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE_ ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ,)
snake_case : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE_ )
return conversation
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = self.tokenizer.eos_token_id
snake_case : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > self.tokenizer.model_max_length:
snake_case : Tuple = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 36 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 1 |
import requests
from bsa import BeautifulSoup
def lowercase ( __A : str = "AAPL" ) -> str:
'''simple docstring'''
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Dict = BeautifulSoup(requests.get(__A ).text , """html.parser""" )
snake_case : Optional[int] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 36 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=99 ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=9 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=5 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.0_02 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,):
'''simple docstring'''
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : Union[str, Any] = encoder_seq_length
snake_case : Optional[int] = decoder_seq_length
# For common tests
snake_case : str = self.decoder_seq_length
snake_case : Dict = is_training
snake_case : Any = use_attention_mask
snake_case : List[Any] = use_labels
snake_case : Dict = vocab_size
snake_case : Dict = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = d_ff
snake_case : Dict = relative_attention_num_buckets
snake_case : List[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : int = eos_token_id
snake_case : str = pad_token_id
snake_case : List[str] = decoder_start_token_id
snake_case : Optional[int] = None
snake_case : Optional[Any] = decoder_layers
def snake_case_ ( self ):
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,):
'''simple docstring'''
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Optional[int] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : int = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=SCREAMING_SNAKE_CASE_ )
if decoder_head_mask is None:
snake_case : int = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=SCREAMING_SNAKE_CASE_ )
if cross_attn_head_mask is None:
snake_case : Any = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=SCREAMING_SNAKE_CASE_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : Optional[Any] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : Optional[int] = self.get_config()
snake_case : Any = config.num_attention_heads
snake_case : List[str] = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return config, input_dict
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case_ ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def snake_case_ ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Tuple = UMTaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Tuple = model(
input_ids=SCREAMING_SNAKE_CASE_ ,decoder_input_ids=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,decoder_attention_mask=SCREAMING_SNAKE_CASE_ ,)
snake_case : Optional[Any] = model(input_ids=SCREAMING_SNAKE_CASE_ ,decoder_input_ids=SCREAMING_SNAKE_CASE_ )
snake_case : Any = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : int = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder().to(SCREAMING_SNAKE_CASE_ ).eval()
# first forward pass
snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ,use_cache=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ ,use_cache=SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) + 1 )
snake_case , snake_case : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : List[str] = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
snake_case : int = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE_ ,past_key_values=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
# select random slice
snake_case : Dict = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,atol=1E-3 ) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).half().eval()
snake_case : int = model(**SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE_ ).any().item() )
@require_torch
class _A ( snake_case , snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__lowerCamelCase : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict = True
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__lowerCamelCase : Union[str, Any] = [0.8, 0.9]
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : List[Any] = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE_ ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=SCREAMING_SNAKE_CASE_ ,opset_version=9 ,input_names=["""input_ids""", """decoder_input_ids"""] ,)
@unittest.skipIf(torch_device == """cpu""" ,"""Cant do half precision""" )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
snake_case : Any = self.model_tester.prepare_config_and_inputs()
snake_case : Dict = config_and_inputs[0]
snake_case : str = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
model.to(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = {
"""head_mask""": torch.zeros(config.num_layers ,config.num_heads ,device=SCREAMING_SNAKE_CASE_ ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers ,config.num_heads ,device=SCREAMING_SNAKE_CASE_ ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers ,config.num_heads ,device=SCREAMING_SNAKE_CASE_ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE_ ,head_masking.items() ):
snake_case : Any = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : Optional[int] = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = model.generate(
config_and_inputs[1]["""input_ids"""] ,num_beams=1 ,max_length=3 ,output_attentions=SCREAMING_SNAKE_CASE_ ,return_dict_in_generate=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" ,return_dict=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""google/umt5-small""" ,use_fast=SCREAMING_SNAKE_CASE_ ,legacy=SCREAMING_SNAKE_CASE_ )
snake_case : str = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
snake_case : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ,padding=SCREAMING_SNAKE_CASE_ ).input_ids
# fmt: off
snake_case : Optional[int] = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : str = model.generate(input_ids.to(SCREAMING_SNAKE_CASE_ ) )
snake_case : Optional[Any] = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
snake_case : Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__lowercase : List[str] = logging.get_logger(__name__)
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case : str = json.loads(__A )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case : List[Any] = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case : int = json.loads(__A )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __A ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : str = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def snake_case_ ( self ):
'''simple docstring'''
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" ,SCREAMING_SNAKE_CASE_ ,)
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
snake_case : Tuple = torch.device("""cpu""" )
snake_case : Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
snake_case : Tuple = smp.local_rank()
snake_case : List[str] = torch.device("""cuda""" ,SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" ,timeout=self.ddp_timeout_delta )
snake_case : Union[str, Any] = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
snake_case : List[str] = torch.device("""cuda""" ,self.local_rank )
snake_case : Tuple = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case : Any = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case : Optional[int] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" ,timeout=self.ddp_timeout_delta )
snake_case : Tuple = torch.device("""cuda""" ,self.local_rank )
snake_case : Optional[int] = 1
if device.type == "cuda":
torch.cuda.set_device(SCREAMING_SNAKE_CASE_ )
return device
@property
def snake_case_ ( self ):
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def snake_case_ ( self ):
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def snake_case_ ( self ):
'''simple docstring'''
return False
| 36 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 1 |
def lowercase ( __A : int = 3 , __A : int = 7 , __A : int = 100_0000 ) -> int:
'''simple docstring'''
snake_case : Tuple = 0
snake_case : Union[str, Any] = 1
for current_denominator in range(1 , limit + 1 ):
snake_case : List[str] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case : List[str] = current_numerator
snake_case : List[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 36 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
__lowerCamelCase : Optional[str] = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__lowerCamelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
__lowerCamelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
__lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
__lowerCamelCase : Optional[int] = field(
default=1_0_0_0_0 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
__lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} )
__lowerCamelCase : Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
__lowerCamelCase : Optional[int] = field(
default=7_5_0 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
__lowerCamelCase : Optional[int] = field(
default=1_6 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
__lowerCamelCase : Optional[bool] = field(
default=snake_case , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
__lowerCamelCase : Optional[int] = field(default=5_0_0_0_0 , metadata={'''help''': '''Maximum number of training steps.'''} )
__lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__lowerCamelCase : Optional[int] = field(default=1_0_2_4 , metadata={'''help''': '''Sequence lengths used for training.'''} )
__lowerCamelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} )
__lowerCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
__lowerCamelCase : Optional[bool] = field(default=snake_case , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__lowerCamelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
__lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__lowerCamelCase : Optional[int] = field(default=1_0_2_4 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
__lowerCamelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__lowerCamelCase : Optional[int] = field(default=snake_case , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
__lowerCamelCase : Optional[int] = field(
default=snake_case , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
__lowerCamelCase : Optional[bool] = field(
default=snake_case , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
__lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
__lowerCamelCase : Optional[int] = field(default=2_5_6 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
__lowerCamelCase : Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
__lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
__lowerCamelCase : Optional[int] = field(default=1_0 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
__lowerCamelCase : Optional[int] = field(
default=2_0_0 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
__lowerCamelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
__lowerCamelCase : Optional[str] = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
__lowerCamelCase : Optional[str] = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
__lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[int] = field(
default=snake_case , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
__lowerCamelCase : Optional[str] = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
__lowerCamelCase : Optional[int] = field(
default=1_0_0_0_0_0 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
__lowerCamelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__lowerCamelCase : Optional[float] = field(
default=1_0_0_0 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
__lowerCamelCase : Optional[float] = field(
default=1_0_0 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
__lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
__lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
__lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
__lowerCamelCase : Optional[bool] = field(
default=snake_case , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
__lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
__lowerCamelCase : Optional[str] = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
__lowerCamelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__lowerCamelCase : Optional[int] = field(default=2_0_0_0_0_0 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
__lowerCamelCase : Optional[int] = field(
default=3_2_7_6_8 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
__lowerCamelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
__lowerCamelCase : Optional[bool] = field(default=snake_case , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
__lowerCamelCase : Optional[str] = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
__lowerCamelCase : Optional[int] = field(default=snake_case , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
__lowerCamelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
__lowerCamelCase : Optional[bool] = field(default=snake_case , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 36 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowercase : Any = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowercase : int = parser.parse_args()
__lowercase : int = '''cpu'''
__lowercase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowercase : Optional[Any] = '''path-to-your-trained-model'''
__lowercase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowercase : Dict = pipe.to(device)
# to channels last
__lowercase : str = pipe.unet.to(memory_format=torch.channels_last)
__lowercase : Dict = pipe.vae.to(memory_format=torch.channels_last)
__lowercase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowercase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowercase : Optional[int] = torch.randn(2, 4, 64, 64)
__lowercase : Optional[int] = torch.rand(1) * 999
__lowercase : Optional[int] = torch.randn(2, 77, 768)
__lowercase : Any = (sample, timestep, encoder_hidden_status)
try:
__lowercase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowercase : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowercase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowercase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowercase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowercase : List[str] = 666
__lowercase : Union[str, Any] = torch.Generator(device).manual_seed(seed)
__lowercase : Any = {'''generator''': generator}
if args.steps is not None:
__lowercase : str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowercase : Dict = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 36 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 1 |
from ... import PretrainedConfig
__lowercase : Union[str, Any] = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__lowerCamelCase : int = '''nezha'''
def __init__( self ,SCREAMING_SNAKE_CASE_=21128 ,SCREAMING_SNAKE_CASE_=768 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=3072 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = vocab_size
snake_case : str = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : int = hidden_act
snake_case : str = intermediate_size
snake_case : Tuple = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : Tuple = max_relative_position
snake_case : List[Any] = type_vocab_size
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[int] = layer_norm_eps
snake_case : Optional[int] = classifier_dropout
snake_case : Optional[int] = use_cache
| 36 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 1 |
from PIL import Image
def lowercase ( __A : Image ) -> Image:
'''simple docstring'''
snake_case , snake_case : Any = image.size
snake_case : Optional[int] = 0
snake_case : Optional[int] = image.load()
for i in range(__A ):
for j in range(__A ):
snake_case : Tuple = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
snake_case : List[str] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowercase : List[Any] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 36 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 1 |
import os
from datetime import datetime as dt
from github import Github
__lowercase : List[str] = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def lowercase ( ) -> Any:
'''simple docstring'''
snake_case : List[str] = Github(os.environ["""GITHUB_TOKEN"""] )
snake_case : Optional[int] = g.get_repo("""huggingface/accelerate""" )
snake_case : str = repo.get_issues(state="""open""" )
for issue in open_issues:
snake_case : Tuple = sorted([comment for comment in issue.get_comments()] , key=lambda __A : i.created_at , reverse=__A )
snake_case : Any = comments[0] if len(__A ) > 0 else None
snake_case : Optional[int] = dt.utcnow()
snake_case : Dict = (current_time - issue.updated_at).days
snake_case : Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 36 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__lowercase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(snake_case )
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self ,"""vision""" )
requires_backends(self ,"""torch""" )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = {}
snake_case : Tuple = {}
snake_case : Any = {}
# preprocess args
if "points_per_batch" in kwargs:
snake_case : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
snake_case : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
snake_case : Optional[Any] = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
snake_case : Optional[int] = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
snake_case : Dict = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case : List[Any] = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
snake_case : Tuple = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
snake_case : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
snake_case : Union[str, Any] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
snake_case : Tuple = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
snake_case : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
snake_case : List[Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self ,SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ ,num_workers=SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_ = 0 ,SCREAMING_SNAKE_CASE_ = 512 / 1500 ,SCREAMING_SNAKE_CASE_ = 32 ,SCREAMING_SNAKE_CASE_ = 1 ,):
'''simple docstring'''
snake_case : Dict = load_image(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = self.image_processor.size["""longest_edge"""]
snake_case , snake_case , snake_case , snake_case : int = self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : int = self.image_processor(images=SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
snake_case : Dict = self.get_inference_context()
with inference_context():
snake_case : str = self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE_ ,device=self.device )
snake_case : Optional[Any] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
snake_case : Union[str, Any] = image_embeddings
snake_case : Optional[int] = grid_points.shape[1]
snake_case : Dict = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = grid_points[:, i : i + points_per_batch, :, :]
snake_case : Dict = input_labels[:, i : i + points_per_batch]
snake_case : int = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0.88 ,SCREAMING_SNAKE_CASE_=0.95 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=1 ,):
'''simple docstring'''
snake_case : List[Any] = model_inputs.pop("""input_boxes""" )
snake_case : List[Any] = model_inputs.pop("""is_last""" )
snake_case : Any = model_inputs.pop("""original_sizes""" ).tolist()
snake_case : Dict = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
snake_case : List[Any] = self.model(**SCREAMING_SNAKE_CASE_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case : Any = model_outputs["""pred_masks"""]
snake_case : Optional[int] = self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,binarize=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = model_outputs["""iou_scores"""]
snake_case , snake_case , snake_case : Dict = self.image_processor.filter_masks(
masks[0] ,iou_scores[0] ,original_sizes[0] ,input_boxes[0] ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=0.7 ,):
'''simple docstring'''
snake_case : List[Any] = []
snake_case : Any = []
snake_case : Optional[int] = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
snake_case : int = torch.cat(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = torch.cat(SCREAMING_SNAKE_CASE_ )
snake_case , snake_case , snake_case , snake_case : List[str] = self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Any = defaultdict(SCREAMING_SNAKE_CASE_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE_ )
snake_case : str = {}
if output_rle_mask:
snake_case : Optional[int] = rle_mask
if output_bboxes_mask:
snake_case : Dict = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 36 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
from math import pi
def lowercase ( __A : int , __A : int ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 36 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1000 ,SCREAMING_SNAKE_CASE_=[3, 3, 6, 4] ,SCREAMING_SNAKE_CASE_=[48, 56, 112, 220] ,):
'''simple docstring'''
snake_case : int = parent
snake_case : List[str] = batch_size
snake_case : List[Any] = num_channels
snake_case : List[Any] = is_training
snake_case : Dict = use_labels
snake_case : str = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : int = num_labels
snake_case : Optional[Any] = image_size
snake_case : Optional[Any] = layer_depths
snake_case : int = embed_dims
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Optional[Any] = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels )
snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths ,embed_dims=self.embed_dims ,mlp_ratio=4 ,downsamples=[True, True, True, True] ,hidden_act="""gelu""" ,num_labels=self.num_labels ,down_patch_size=3 ,down_stride=2 ,down_pad=1 ,drop_rate=0.0 ,drop_path_rate=0.0 ,use_layer_scale=SCREAMING_SNAKE_CASE_ ,layer_scale_init_value=1E-5 ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = SwiftFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dims[-1], 7, 7) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : str = self.num_labels
snake_case : List[str] = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Tuple = model(SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
snake_case : Optional[int] = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case_ ( self ):
'''simple docstring'''
((snake_case) , (snake_case) , (snake_case)) : Dict = self.prepare_config_and_inputs()
snake_case : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__lowerCamelCase : Dict = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : str = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[Any] = False
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = SwiftFormerModelTester(self )
snake_case : List[str] = ConfigTester(
self ,config_class=SCREAMING_SNAKE_CASE_ ,has_text_modality=SCREAMING_SNAKE_CASE_ ,hidden_size=37 ,num_attention_heads=12 ,num_hidden_layers=12 ,)
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ ,nn.Linear ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : List[Any] = [*signature.parameters.keys()]
snake_case : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = SwiftFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
snake_case : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = outputs.hidden_states
snake_case : List[Any] = 8
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertEqual(
hidden_states[i].shape ,torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) ,)
snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
def _config_zero_init(SCREAMING_SNAKE_CASE_ ):
snake_case : int = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,1E-10 )
if isinstance(getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ):
snake_case : List[Any] = _config_zero_init(getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
setattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return configs_no_init
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = self.default_image_processor
snake_case : Optional[int] = prepare_img()
snake_case : Any = image_processor(images=SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
snake_case : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE_ )
snake_case : str = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE_ ,atol=1E-4 ) )
| 36 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 1 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__lowercase : Tuple = True
except ImportError:
__lowercase : Union[str, Any] = False
try:
from torch.hub import _get_torch_home
__lowercase : Tuple = _get_torch_home()
except ImportError:
__lowercase : Union[str, Any] = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
__lowercase : Optional[Any] = os.path.join(torch_cache_home, '''transformers''')
__lowercase : Dict = '''https://cdn.huggingface.co'''
__lowercase : Union[str, Any] = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
__lowercase : Any = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
__lowercase : Optional[int] = os.path.join(PATH, '''config.yaml''')
__lowercase : List[str] = os.path.join(PATH, '''attributes.txt''')
__lowercase : int = os.path.join(PATH, '''objects.txt''')
__lowercase : str = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
__lowercase : int = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
__lowercase : Dict = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
__lowercase : List[Any] = '''pytorch_model.bin'''
__lowercase : Optional[Any] = '''config.yaml'''
def lowercase ( __A : int=OBJECTS , __A : Dict=ATTRIBUTES ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = []
with open(__A ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
snake_case : Union[str, Any] = []
with open(__A ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( __A : str ) -> Tuple:
'''simple docstring'''
snake_case : str = OrderedDict()
with open(__A , """rb""" ) as f:
snake_case : Tuple = pkl.load(__A )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
snake_case : Optional[Any] = ckp.pop(__A )
if isinstance(__A , np.ndarray ):
snake_case : int = torch.tensor(__A )
else:
assert isinstance(__A , torch.tensor ), type(__A )
snake_case : Optional[Any] = v
return r
class _A :
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = {}
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "root" ,SCREAMING_SNAKE_CASE_=0 ):
'''simple docstring'''
snake_case : Any = name
snake_case : Union[str, Any] = level
snake_case : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
snake_case : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = Config(SCREAMING_SNAKE_CASE_ ,name=SCREAMING_SNAKE_CASE_ ,level=level + 1 )
snake_case : Tuple = v
setattr(self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = d
def __repr__( self ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = val
snake_case : Any = val
snake_case : Tuple = key.split(""".""" )
snake_case : List[str] = len(SCREAMING_SNAKE_CASE_ ) - 1
snake_case : str = self._pointer
if len(SCREAMING_SNAKE_CASE_ ) > 1:
for i, l in enumerate(SCREAMING_SNAKE_CASE_ ):
if hasattr(self ,SCREAMING_SNAKE_CASE_ ) and isinstance(getattr(self ,SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ):
setattr(getattr(self ,SCREAMING_SNAKE_CASE_ ) ,""".""".join(levels[i:] ) ,SCREAMING_SNAKE_CASE_ )
if l == last_level:
snake_case : List[Any] = val
else:
snake_case : Optional[int] = pointer[l]
def snake_case_ ( self ):
'''simple docstring'''
return self._pointer
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(F"""{file_name}""" ,"""w""" ) as stream:
dump(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(F"""{file_name}""" ,"""w""" ) as stream:
json.dump(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ ) as stream:
snake_case : Optional[int] = load(SCREAMING_SNAKE_CASE_ ,Loader=SCREAMING_SNAKE_CASE_ )
return data
def __str__( self ):
'''simple docstring'''
snake_case : Tuple = """ """
if self._name != "root":
snake_case : List[str] = F"""{t * (self._level-1)}{self._name}:\n"""
else:
snake_case : Dict = """"""
snake_case : Dict = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(SCREAMING_SNAKE_CASE_ ).__name__})\n"""
snake_case : Tuple = level
return r[:-1]
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case , snake_case : str = cls.get_config_dict(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
return cls(SCREAMING_SNAKE_CASE_ )
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = kwargs.pop("""cache_dir""" ,SCREAMING_SNAKE_CASE_ )
snake_case : Dict = kwargs.pop("""force_download""" ,SCREAMING_SNAKE_CASE_ )
snake_case : Dict = kwargs.pop("""resume_download""" ,SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = kwargs.pop("""proxies""" ,SCREAMING_SNAKE_CASE_ )
snake_case : Any = kwargs.pop("""local_files_only""" ,SCREAMING_SNAKE_CASE_ )
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
snake_case : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
elif os.path.isfile(SCREAMING_SNAKE_CASE_ ) or is_remote_url(SCREAMING_SNAKE_CASE_ ):
snake_case : Tuple = pretrained_model_name_or_path
else:
snake_case : List[Any] = hf_bucket_url(SCREAMING_SNAKE_CASE_ ,filename=SCREAMING_SNAKE_CASE_ ,use_cdn=SCREAMING_SNAKE_CASE_ )
try:
# Load from URL or cache if already cached
snake_case : Optional[int] = cached_path(
SCREAMING_SNAKE_CASE_ ,cache_dir=SCREAMING_SNAKE_CASE_ ,force_download=SCREAMING_SNAKE_CASE_ ,proxies=SCREAMING_SNAKE_CASE_ ,resume_download=SCREAMING_SNAKE_CASE_ ,local_files_only=SCREAMING_SNAKE_CASE_ ,)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
snake_case : List[str] = Config.load_yaml(SCREAMING_SNAKE_CASE_ )
except EnvironmentError:
snake_case : List[Any] = """Can't load config for"""
raise EnvironmentError(SCREAMING_SNAKE_CASE_ )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(SCREAMING_SNAKE_CASE_ ), kwargs
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = torch.load("""dump.pt""" , map_location=in_tensor.device )
snake_case : int = in_tensor.numpy()
snake_case : int = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__A , __A , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__A , __A , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowercase ( __A : str ) -> Any:
'''simple docstring'''
snake_case : int = urlparse(__A )
return parsed.scheme in ("http", "https")
def lowercase ( __A : str , __A : str , __A : Union[str, Any]=True ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
snake_case : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowercase ( __A : Union[str, Any] , __A : int , __A : Dict=None , __A : List[Any]=0 , __A : Dict=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__A , __A ):
ua += "; " + "; ".join("""{}/{}""".format(__A , __A ) for k, v in user_agent.items() )
elif isinstance(__A , __A ):
ua += "; " + user_agent
snake_case : int = {"""user-agent""": ua}
if resume_size > 0:
snake_case : int = """bytes=%d-""" % (resume_size,)
snake_case : List[Any] = requests.get(__A , stream=__A , proxies=__A , headers=__A )
if response.status_code == 416: # Range not satisfiable
return
snake_case : Optional[int] = response.headers.get("""Content-Length""" )
snake_case : int = resume_size + int(__A ) if content_length is not None else None
snake_case : List[Any] = tqdm(
unit="""B""" , unit_scale=__A , total=__A , initial=__A , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__A ) )
temp_file.write(__A )
progress.close()
def lowercase ( __A : Tuple , __A : Tuple=None , __A : str=False , __A : Union[str, Any]=None , __A : Optional[Any]=10 , __A : int=False , __A : Dict=None , __A : Optional[int]=False , ) -> Dict:
'''simple docstring'''
if cache_dir is None:
snake_case : List[Any] = TRANSFORMERS_CACHE
if isinstance(__A , __A ):
snake_case : str = str(__A )
os.makedirs(__A , exist_ok=__A )
snake_case : List[str] = None
if not local_files_only:
try:
snake_case : int = requests.head(__A , allow_redirects=__A , proxies=__A , timeout=__A )
if response.status_code == 200:
snake_case : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
snake_case : Optional[int] = url_to_filename(__A , __A )
# get cache path to put the file
snake_case : Any = os.path.join(__A , __A )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__A ):
return cache_path
else:
snake_case : Tuple = [
file
for file in fnmatch.filter(os.listdir(__A ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__A ) > 0:
return os.path.join(__A , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__A ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
snake_case : Dict = cache_path + """.lock"""
with FileLock(__A ):
# If the download just completed while the lock was activated.
if os.path.exists(__A ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
snake_case : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__A , """a+b""" ) as f:
yield f
snake_case : List[str] = _resumable_file_manager
if os.path.exists(__A ):
snake_case : Dict = os.stat(__A ).st_size
else:
snake_case : Optional[int] = 0
else:
snake_case : Dict = partial(tempfile.NamedTemporaryFile , dir=__A , delete=__A )
snake_case : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __A , temp_file.name , )
http_get(
__A , __A , proxies=__A , resume_size=__A , user_agent=__A , )
os.replace(temp_file.name , __A )
snake_case : Any = {"""url""": url, """etag""": etag}
snake_case : Dict = cache_path + """.json"""
with open(__A , """w""" ) as meta_file:
json.dump(__A , __A )
return cache_path
def lowercase ( __A : Optional[int] , __A : List[Any]=None ) -> str:
'''simple docstring'''
snake_case : List[Any] = url.encode("""utf-8""" )
snake_case : str = shaaaa(__A )
snake_case : Optional[Any] = url_hash.hexdigest()
if etag:
snake_case : str = etag.encode("""utf-8""" )
snake_case : Optional[Any] = shaaaa(__A )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowercase ( __A : Optional[int] , __A : List[str]=None , __A : Union[str, Any]=False , __A : Optional[int]=None , __A : Union[str, Any]=False , __A : Union[str, Any]=None , __A : List[str]=False , __A : str=False , __A : Union[str, Any]=False , ) -> List[str]:
'''simple docstring'''
if cache_dir is None:
snake_case : Any = TRANSFORMERS_CACHE
if isinstance(__A , __A ):
snake_case : Tuple = str(__A )
if isinstance(__A , __A ):
snake_case : int = str(__A )
if is_remote_url(__A ):
# URL, so get it from the cache (downloading if necessary)
snake_case : List[Any] = get_from_cache(
__A , cache_dir=__A , force_download=__A , proxies=__A , resume_download=__A , user_agent=__A , local_files_only=__A , )
elif os.path.exists(__A ):
# File, and it exists.
snake_case : Tuple = url_or_filename
elif urlparse(__A ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__A ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__A ) )
if extract_compressed_file:
if not is_zipfile(__A ) and not tarfile.is_tarfile(__A ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
snake_case , snake_case : Dict = os.path.split(__A )
snake_case : int = output_file.replace(""".""" , """-""" ) + """-extracted"""
snake_case : Any = os.path.join(__A , __A )
if os.path.isdir(__A ) and os.listdir(__A ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
snake_case : int = output_path + """.lock"""
with FileLock(__A ):
shutil.rmtree(__A , ignore_errors=__A )
os.makedirs(__A )
if is_zipfile(__A ):
with ZipFile(__A , """r""" ) as zip_file:
zip_file.extractall(__A )
zip_file.close()
elif tarfile.is_tarfile(__A ):
snake_case : Optional[int] = tarfile.open(__A )
tar_file.extractall(__A )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__A ) )
return output_path_extracted
return output_path
def lowercase ( __A : Optional[int] , __A : str="," ) -> List[Any]:
'''simple docstring'''
assert isinstance(__A , __A )
if os.path.isfile(__A ):
with open(__A ) as f:
snake_case : Any = eval(f.read() )
else:
snake_case : Any = requests.get(__A )
try:
snake_case : Union[str, Any] = requests.json()
except Exception:
snake_case : Optional[int] = req.content.decode()
assert data is not None, "could not connect"
try:
snake_case : str = eval(__A )
except Exception:
snake_case : Optional[Any] = data.split("""\n""" )
req.close()
return data
def lowercase ( __A : Dict ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = requests.get(__A )
snake_case : Dict = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( __A : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__A )
with open(__A , """rb""" ) as stream:
snake_case : str = pkl.load(__A )
snake_case : Optional[int] = weights.pop("""model""" )
snake_case : List[Any] = {}
for k, v in model.items():
snake_case : Tuple = torch.from_numpy(__A )
if "running_var" in k:
snake_case : Optional[int] = torch.tensor([0] )
snake_case : Optional[Any] = k.replace("""running_var""" , """num_batches_tracked""" )
snake_case : Any = zero
return new
def lowercase ( ) -> List[Any]:
'''simple docstring'''
print(f"""{os.path.abspath(os.path.join(__A , os.pardir ) )}/demo.ipynb""" )
def lowercase ( __A : str , __A : Dict="RGB" ) -> str:
'''simple docstring'''
assert isinstance(__A , __A )
if os.path.isfile(__A ):
snake_case : List[Any] = cva.imread(__A )
else:
snake_case : Optional[int] = get_image_from_url(__A )
assert img is not None, f"""could not connect to: {im}"""
snake_case : Optional[Any] = cva.cvtColor(__A , cva.COLOR_BGR2RGB )
if input_format == "RGB":
snake_case : str = img[:, :, ::-1]
return img
def lowercase ( __A : Union[str, Any] , __A : Union[str, Any]=1 ) -> List[Any]:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(__A ) , __A ))
| 36 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 1 |
from __future__ import annotations
def lowercase ( __A : dict , __A : str ) -> set[str]:
'''simple docstring'''
snake_case , snake_case : str = set(__A ), [start]
while stack:
snake_case : str = stack.pop()
explored.add(__A )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__A )
return explored
__lowercase : Union[str, Any] = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 36 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 1 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : torch.FloatTensor
__lowerCamelCase : torch.FloatTensor
class _A ( snake_case , snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = 1
@register_to_config
def __init__( self ,SCREAMING_SNAKE_CASE_ = 2000 ,SCREAMING_SNAKE_CASE_ = 0.15 ,SCREAMING_SNAKE_CASE_ = 0.01 ,SCREAMING_SNAKE_CASE_ = 13_48.0 ,SCREAMING_SNAKE_CASE_ = 1E-5 ,SCREAMING_SNAKE_CASE_ = 1 ,):
'''simple docstring'''
# standard deviation of the initial noise distribution
snake_case : Union[str, Any] = sigma_max
# setable values
snake_case : Any = None
self.set_sigmas(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return sample
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
snake_case : str = torch.linspace(1 ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,device=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
snake_case : str = sigma_max if sigma_max is not None else self.config.sigma_max
snake_case : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
snake_case : List[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE_ ) ,math.log(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ) )
snake_case : Optional[Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device ) ) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device ) ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
snake_case : Optional[Any] = timestep * torch.ones(
sample.shape[0] ,device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
snake_case : List[str] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
snake_case : Dict = timesteps.to(self.discrete_sigmas.device )
snake_case : Any = self.discrete_sigmas[timesteps].to(sample.device )
snake_case : str = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ).to(sample.device )
snake_case : Tuple = torch.zeros_like(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
snake_case : Tuple = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
snake_case : Dict = diffusion.unsqueeze(-1 )
snake_case : List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
snake_case : Tuple = randn_tensor(
sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE_ ,device=sample.device ,dtype=sample.dtype )
snake_case : Tuple = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
snake_case : List[str] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE_ ,prev_sample_mean=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
snake_case : Dict = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
snake_case : List[str] = torch.norm(model_output.reshape(model_output.shape[0] ,-1 ) ,dim=-1 ).mean()
snake_case : List[Any] = torch.norm(noise.reshape(noise.shape[0] ,-1 ) ,dim=-1 ).mean()
snake_case : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
snake_case : int = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
snake_case : Optional[int] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
snake_case : Union[str, Any] = step_size.unsqueeze(-1 )
snake_case : Optional[Any] = sample + step_size * model_output
snake_case : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case : Optional[Any] = timesteps.to(original_samples.device )
snake_case : Any = self.discrete_sigmas.to(original_samples.device )[timesteps]
snake_case : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE_ ) * sigmas[:, None, None, None]
)
snake_case : Optional[Any] = noise + original_samples
return noisy_samples
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 36 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = inspect.getfile(accelerate.test_utils )
snake_case : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
snake_case : Union[str, Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
snake_case : Dict = [sys.executable] + distributed_args
execute_subprocess_async(SCREAMING_SNAKE_CASE_ ,env=os.environ.copy() )
| 36 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : torch.FloatTensor
__lowerCamelCase : torch.FloatTensor
__lowerCamelCase : Optional[torch.FloatTensor] = None
class _A ( snake_case , snake_case ):
'''simple docstring'''
__lowerCamelCase : str = 2
@register_to_config
def __init__( self ,SCREAMING_SNAKE_CASE_ = 0.02 ,SCREAMING_SNAKE_CASE_ = 100 ,SCREAMING_SNAKE_CASE_ = 1.0_07 ,SCREAMING_SNAKE_CASE_ = 80 ,SCREAMING_SNAKE_CASE_ = 0.05 ,SCREAMING_SNAKE_CASE_ = 50 ,):
'''simple docstring'''
# standard deviation of the initial noise distribution
snake_case : Union[str, Any] = sigma_max
# setable values
snake_case : int = None
snake_case : np.IntTensor = None
snake_case : torch.FloatTensor = None # sigma(t_i)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return sample
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = num_inference_steps
snake_case : List[str] = np.arange(0 ,self.num_inference_steps )[::-1].copy()
snake_case : str = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
snake_case : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ ,dtype=torch.floataa ,device=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
snake_case : Any = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
snake_case : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
snake_case : Optional[int] = self.config.s_noise * randn_tensor(sample.shape ,generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
snake_case : Union[str, Any] = sigma + gamma * sigma
snake_case : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = True ,):
'''simple docstring'''
snake_case : Optional[int] = sample_hat + sigma_hat * model_output
snake_case : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
snake_case : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ ,derivative=SCREAMING_SNAKE_CASE_ ,pred_original_sample=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = True ,):
'''simple docstring'''
snake_case : int = sample_prev + sigma_prev * model_output
snake_case : str = (sample_prev - pred_original_sample) / sigma_prev
snake_case : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ ,derivative=SCREAMING_SNAKE_CASE_ ,pred_original_sample=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
raise NotImplementedError()
| 36 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 1 |
from __future__ import annotations
def lowercase ( __A : int , __A : int ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((snake_case) , (snake_case)) : List[str] = extended_euclid(__A , a % b )
snake_case : List[str] = a // b
return (y, x - k * y)
def lowercase ( __A : int , __A : int , __A : int , __A : int ) -> int:
'''simple docstring'''
((snake_case) , (snake_case)) : str = extended_euclid(__A , __A )
snake_case : Tuple = na * na
snake_case : List[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase ( __A : int , __A : int ) -> int:
'''simple docstring'''
((snake_case) , (snake_case)) : str = extended_euclid(__A , __A )
if b < 0:
snake_case : List[str] = (b % n + n) % n
return b
def lowercase ( __A : int , __A : int , __A : int , __A : int ) -> int:
'''simple docstring'''
snake_case , snake_case : Optional[Any] = invert_modulo(__A , __A ), invert_modulo(__A , __A )
snake_case : Tuple = na * na
snake_case : Optional[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 36 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 1 |
import sys
__lowercase : Union[str, Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def lowercase ( __A : str ) -> int:
'''simple docstring'''
snake_case : Optional[int] = 1
for digit in s:
product *= int(__A )
return product
def lowercase ( __A : str = N ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = -sys.maxsize - 1
snake_case : Optional[Any] = n[:13]
snake_case : Any = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case : Optional[Any] = max(__A , str_eval(__A ) )
snake_case : Optional[int] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase : Optional[List[str]] = None
__lowercase : Tuple = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase : str = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : bool = True
__lowerCamelCase : Optional[str] = None
# Automatically constructed
__lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
__lowerCamelCase : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
__lowerCamelCase : str = field(default='''Image''' , init=snake_case , repr=snake_case )
def __call__( self ):
'''simple docstring'''
return self.pa_type
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = np.array(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return {"path": value, "bytes": None}
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return {"path": None, "bytes": value}
elif isinstance(SCREAMING_SNAKE_CASE_ ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(SCREAMING_SNAKE_CASE_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
snake_case : str = {}
snake_case , snake_case : Optional[int] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(SCREAMING_SNAKE_CASE_ ):
snake_case : Optional[int] = PIL.Image.open(SCREAMING_SNAKE_CASE_ )
else:
snake_case : List[str] = path.split("""::""" )[-1]
try:
snake_case : Tuple = string_to_dict(SCREAMING_SNAKE_CASE_ ,config.HUB_DATASETS_URL )["""repo_id"""]
snake_case : List[str] = token_per_repo_id.get(SCREAMING_SNAKE_CASE_ )
except ValueError:
snake_case : Optional[int] = None
with xopen(SCREAMING_SNAKE_CASE_ ,"""rb""" ,use_auth_token=SCREAMING_SNAKE_CASE_ ) as f:
snake_case : int = BytesIO(f.read() )
snake_case : Union[str, Any] = PIL.Image.open(bytes_ )
else:
snake_case : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case_ ( self ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
snake_case : List[Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) ,type=pa.binary() )
snake_case : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case : Union[str, Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) ,type=pa.string() )
snake_case : Tuple = pa.StructArray.from_arrays([storage, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
snake_case : Any = storage.field("""bytes""" )
else:
snake_case : Dict = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) ,type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
snake_case : Union[str, Any] = storage.field("""path""" )
else:
snake_case : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) ,type=pa.string() )
snake_case : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
snake_case : List[str] = pa.array(
[encode_np_array(np.array(SCREAMING_SNAKE_CASE_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
snake_case : Tuple = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) ,type=pa.string() )
snake_case : List[str] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ ,self.pa_type )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE_ ):
with xopen(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as f:
snake_case : Optional[int] = f.read()
return bytes_
snake_case : Union[str, Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
snake_case : Any = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] ,type=pa.string() ,)
snake_case : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ ,self.pa_type )
def lowercase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
snake_case : Any = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase ( __A : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
snake_case : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
snake_case : Tuple = image.format
else:
snake_case : List[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__A , format=__A )
return buffer.getvalue()
def lowercase ( __A : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(__A , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def lowercase ( __A : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
snake_case : List[Any] = array.dtype
snake_case : Dict = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
snake_case : str = dtype.kind
snake_case : Optional[int] = dtype.itemsize
snake_case : Union[str, Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
snake_case : str = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
snake_case : Tuple = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
snake_case : Any = dtype_byteorder + dtype_kind + str(__A )
snake_case : Dict = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
snake_case : Optional[Any] = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def lowercase ( __A : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
snake_case , snake_case : Optional[int] = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
snake_case : List[Any] = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
snake_case : Tuple = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 36 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : List[Any] = logging.get_logger(__name__)
__lowercase : Tuple = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''xlm'''
__lowerCamelCase : Any = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self ,SCREAMING_SNAKE_CASE_=30145 ,SCREAMING_SNAKE_CASE_=2048 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2048**-0.5 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=5 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_="first" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=5 ,SCREAMING_SNAKE_CASE_=5 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Optional[int] = vocab_size
snake_case : Optional[int] = emb_dim
snake_case : Optional[Any] = n_layers
snake_case : Dict = n_heads
snake_case : Union[str, Any] = dropout
snake_case : Dict = attention_dropout
snake_case : Tuple = gelu_activation
snake_case : Tuple = sinusoidal_embeddings
snake_case : Optional[Any] = causal
snake_case : int = asm
snake_case : Any = n_langs
snake_case : str = use_lang_emb
snake_case : Optional[int] = layer_norm_eps
snake_case : Optional[Any] = bos_index
snake_case : List[str] = eos_index
snake_case : Optional[int] = pad_index
snake_case : Optional[Any] = unk_index
snake_case : Union[str, Any] = mask_index
snake_case : Any = is_encoder
snake_case : Dict = max_position_embeddings
snake_case : Optional[int] = embed_init_std
snake_case : Dict = init_std
snake_case : List[Any] = summary_type
snake_case : List[Any] = summary_use_proj
snake_case : List[str] = summary_activation
snake_case : Dict = summary_proj_to_labels
snake_case : Union[str, Any] = summary_first_dropout
snake_case : str = start_n_top
snake_case : Tuple = end_n_top
snake_case : str = mask_token_id
snake_case : Tuple = lang_id
if "n_words" in kwargs:
snake_case : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 1 |
def lowercase ( __A : int , __A : int ) -> Optional[int]:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__A , int(b / 2 ) ) * actual_power(__A , int(b / 2 ) )
else:
return a * actual_power(__A , int(b / 2 ) ) * actual_power(__A , int(b / 2 ) )
def lowercase ( __A : int , __A : int ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(__A , __A )
return actual_power(__A , __A )
if __name__ == "__main__":
print(power(-2, -3))
| 36 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase ( __A : Any , __A : bool = True , __A : float = math.inf , __A : float = -math.inf , __A : float = math.inf , __A : float = -math.inf , __A : bool = False , __A : float = 100 , __A : float = 0.01 , __A : float = 1 , ) -> Any:
'''simple docstring'''
snake_case : Tuple = False
snake_case : List[Any] = search_prob
snake_case : Union[str, Any] = start_temperate
snake_case : Optional[Any] = []
snake_case : Optional[int] = 0
snake_case : Any = None
while not search_end:
snake_case : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
snake_case : str = current_state
scores.append(__A )
iterations += 1
snake_case : Optional[Any] = None
snake_case : List[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
snake_case : str = random.randint(0 , len(__A ) - 1 ) # picking a random neighbor
snake_case : Optional[Any] = neighbors.pop(__A )
snake_case : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
snake_case : List[str] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
snake_case : Dict = picked_neighbor
else:
snake_case : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
snake_case : Union[str, Any] = picked_neighbor
snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
snake_case : Tuple = True
else:
snake_case : Tuple = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__A ) , __A )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase ( __A : int , __A : int ) -> Union[str, Any]:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowercase : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase : List[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__lowercase : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase : str = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowercase ( __A : str , __A : Any ) -> Any:
'''simple docstring'''
return (3 * x**2) - (6 * y)
__lowercase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase : Optional[int] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
__lowercase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase : List[str] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 36 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Dict = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : torch.FloatTensor
class _A ( snake_case , snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self ,SCREAMING_SNAKE_CASE_ = 65536 ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 2 ,SCREAMING_SNAKE_CASE_ = 2 ,SCREAMING_SNAKE_CASE_ = 0 ,SCREAMING_SNAKE_CASE_ = "fourier" ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = 0.0 ,SCREAMING_SNAKE_CASE_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,SCREAMING_SNAKE_CASE_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,SCREAMING_SNAKE_CASE_ = "UNetMidBlock1D" ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = (32, 32, 64) ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 8 ,SCREAMING_SNAKE_CASE_ = 1 ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
super().__init__()
snake_case : Tuple = sample_size
# time
if time_embedding_type == "fourier":
snake_case : str = GaussianFourierProjection(
embedding_size=8 ,set_W_to_weight=SCREAMING_SNAKE_CASE_ ,log=SCREAMING_SNAKE_CASE_ ,flip_sin_to_cos=SCREAMING_SNAKE_CASE_ )
snake_case : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case : List[str] = Timesteps(
block_out_channels[0] ,flip_sin_to_cos=SCREAMING_SNAKE_CASE_ ,downscale_freq_shift=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = block_out_channels[0]
if use_timestep_embedding:
snake_case : Optional[int] = block_out_channels[0] * 4
snake_case : Any = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE_ ,time_embed_dim=SCREAMING_SNAKE_CASE_ ,act_fn=SCREAMING_SNAKE_CASE_ ,out_dim=block_out_channels[0] ,)
snake_case : List[str] = nn.ModuleList([] )
snake_case : Union[str, Any] = None
snake_case : List[str] = nn.ModuleList([] )
snake_case : Optional[Any] = None
# down
snake_case : List[str] = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
snake_case : str = output_channel
snake_case : Union[str, Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case : Optional[int] = i == len(SCREAMING_SNAKE_CASE_ ) - 1
snake_case : str = get_down_block(
SCREAMING_SNAKE_CASE_ ,num_layers=SCREAMING_SNAKE_CASE_ ,in_channels=SCREAMING_SNAKE_CASE_ ,out_channels=SCREAMING_SNAKE_CASE_ ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,)
self.down_blocks.append(SCREAMING_SNAKE_CASE_ )
# mid
snake_case : List[str] = get_mid_block(
SCREAMING_SNAKE_CASE_ ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=SCREAMING_SNAKE_CASE_ ,add_downsample=SCREAMING_SNAKE_CASE_ ,)
# up
snake_case : Optional[Any] = list(reversed(SCREAMING_SNAKE_CASE_ ) )
snake_case : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case : Union[str, Any] = out_channels
else:
snake_case : Any = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
snake_case : List[str] = output_channel
snake_case : Optional[int] = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels
)
snake_case : List[str] = i == len(SCREAMING_SNAKE_CASE_ ) - 1
snake_case : int = get_up_block(
SCREAMING_SNAKE_CASE_ ,num_layers=SCREAMING_SNAKE_CASE_ ,in_channels=SCREAMING_SNAKE_CASE_ ,out_channels=SCREAMING_SNAKE_CASE_ ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,)
self.up_blocks.append(SCREAMING_SNAKE_CASE_ )
snake_case : str = output_channel
# out
snake_case : Tuple = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,32 )
snake_case : Tuple = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE_ ,num_groups_out=SCREAMING_SNAKE_CASE_ ,embed_dim=block_out_channels[0] ,out_channels=SCREAMING_SNAKE_CASE_ ,act_fn=SCREAMING_SNAKE_CASE_ ,fc_dim=block_out_channels[-1] // 4 ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = True ,):
'''simple docstring'''
snake_case : Union[str, Any] = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0:
snake_case : Any = timesteps[None].to(sample.device )
snake_case : Optional[int] = self.time_proj(SCREAMING_SNAKE_CASE_ )
if self.config.use_timestep_embedding:
snake_case : Optional[Any] = self.time_mlp(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Optional[int] = timestep_embed[..., None]
snake_case : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case : Any = ()
for downsample_block in self.down_blocks:
snake_case , snake_case : Optional[int] = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ ,temb=SCREAMING_SNAKE_CASE_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case : List[Any] = self.mid_block(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case : str = down_block_res_samples[-1:]
snake_case : int = down_block_res_samples[:-1]
snake_case : int = upsample_block(SCREAMING_SNAKE_CASE_ ,res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ,temb=SCREAMING_SNAKE_CASE_ )
# 5. post-process
if self.out_block:
snake_case : Any = self.out_block(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
| 36 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 1 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Optional[int] = logging.get_logger(__name__)
set_seed(770)
__lowercase : Dict = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__lowercase : int = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__lowercase : List[str] = os.path.dirname(os.path.abspath(__file__))
__lowercase : List[Any] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__lowercase : Any = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowercase ( __A : Union[str, Any] , __A : int=False ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(__A , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def lowercase ( __A : Any , __A : int ) -> Any:
'''simple docstring'''
os.makedirs(__A , exist_ok=__A )
hf_hub_download(repo_id=__A , filename=__A , local_dir=__A )
def lowercase ( __A : Any , __A : Any , __A : Dict=False , __A : Any="text" ) -> Optional[Any]:
'''simple docstring'''
if model_type == "text":
snake_case : Dict = BarkSemanticModel
snake_case : Optional[Any] = BarkSemanticConfig
snake_case : Dict = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case : Optional[Any] = BarkCoarseModel
snake_case : Any = BarkCoarseConfig
snake_case : List[Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case : Dict = BarkFineModel
snake_case : Any = BarkFineConfig
snake_case : Any = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case : Tuple = f"""{model_type}_small""" if use_small else model_type
snake_case : List[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__A ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
snake_case : List[str] = torch.load(__A , map_location=__A )
# this is a hack
snake_case : Optional[Any] = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
snake_case : Tuple = model_args["""vocab_size"""]
snake_case : Tuple = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case : Any = model_args.pop("""n_head""" )
snake_case : int = model_args.pop("""n_embd""" )
snake_case : List[Any] = model_args.pop("""n_layer""" )
snake_case : List[Any] = ConfigClass(**checkpoint["""model_args"""] )
snake_case : Tuple = ModelClass(config=__A )
snake_case : str = GenerationConfigClass()
snake_case : Optional[int] = model_generation_config
snake_case : Optional[int] = checkpoint["""model"""]
# fixup checkpoint
snake_case : str = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(__A ):
# replace part of the key with corresponding layer name in HF implementation
snake_case : Dict = k[len(__A ) :]
for old_layer_name in new_layer_name_dict:
snake_case : Optional[Any] = new_k.replace(__A , new_layer_name_dict[old_layer_name] )
snake_case : Union[str, Any] = state_dict.pop(__A )
snake_case : str = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case : Optional[int] = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
snake_case : Optional[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case : List[str] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(__A ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(__A ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = model.num_parameters(exclude_embeddings=__A )
snake_case : int = checkpoint["""best_val_loss"""].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(__A , 3 )} loss""" )
model.eval()
model.to(__A )
del checkpoint, state_dict
return model
def lowercase ( __A : Optional[int] , __A : Dict=False , __A : Any="text" ) -> str:
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case : List[str] = """cpu""" # do conversion on cpu
snake_case : Dict = _get_ckpt_path(__A , use_small=__A )
snake_case : Tuple = _load_model(__A , __A , model_type=__A , use_small=__A )
# load bark initial model
snake_case : int = _bark_load_model(__A , """cpu""" , model_type=__A , use_small=__A )
if model_type == "text":
snake_case : Union[str, Any] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=__A ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
snake_case : str = 5
snake_case : Dict = 10
if model_type in ["text", "coarse"]:
snake_case : Optional[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
snake_case : Optional[int] = bark_model(__A )[0]
snake_case : Optional[Any] = model(__A )
# take last logits
snake_case : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
snake_case : int = 3
snake_case : Tuple = 8
snake_case : int = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case : List[Any] = model(__A , __A )
snake_case : Optional[Any] = bark_model(__A , __A )
snake_case : str = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
def lowercase ( __A : List[str] , __A : List[Any] , __A : Optional[int] , __A : Optional[Any] , __A : List[Any] , __A : str , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = os.path.join(__A , __A )
snake_case : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(__A , """config.json""" ) )
snake_case : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(__A , """config.json""" ) )
snake_case : str = BarkFineConfig.from_pretrained(os.path.join(__A , """config.json""" ) )
snake_case : List[Any] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
snake_case : List[str] = BarkSemanticModel.from_pretrained(__A )
snake_case : List[Any] = BarkCoarseModel.from_pretrained(__A )
snake_case : str = BarkFineModel.from_pretrained(__A )
snake_case : Dict = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
snake_case : Tuple = BarkConfig.from_sub_model_configs(
__A , __A , __A , __A )
snake_case : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case : str = BarkModel(__A )
snake_case : int = semantic
snake_case : Optional[Any] = coarseAcoustic
snake_case : Dict = fineAcoustic
snake_case : int = codec
snake_case : Dict = bark_generation_config
Path(__A ).mkdir(exist_ok=__A )
bark.save_pretrained(__A , repo_id=__A , push_to_hub=__A )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__lowercase : Dict = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 36 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''spm_char.model'''}
__lowercase : Tuple = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
__lowercase : Optional[int] = {
'''microsoft/speecht5_asr''': 1_024,
'''microsoft/speecht5_tts''': 1_024,
'''microsoft/speecht5_vc''': 1_024,
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_="<s>" ,SCREAMING_SNAKE_CASE_="</s>" ,SCREAMING_SNAKE_CASE_="<unk>" ,SCREAMING_SNAKE_CASE_="<pad>" ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ ,eos_token=SCREAMING_SNAKE_CASE_ ,unk_token=SCREAMING_SNAKE_CASE_ ,pad_token=SCREAMING_SNAKE_CASE_ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE_ ,)
snake_case : Optional[Any] = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
snake_case : Dict = self.__dict__.copy()
snake_case : Dict = None
return state
def __setstate__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
snake_case : Optional[int] = {}
snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ ,out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[Any] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
return token
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = []
snake_case : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
snake_case : Any = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ ,token_ids_a=SCREAMING_SNAKE_CASE_ ,already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = [1]
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 36 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
import gc
import threading
import time
import psutil
import torch
class _A :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case : Union[str, Any] = psutil.Process()
snake_case : Union[str, Any] = False
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = -1
while True:
snake_case : List[str] = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : Optional[Any] = threading.Thread(target=self.peak_monitor )
snake_case : int = True
self.thread.start()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = False
self.thread.join()
return self.cpu_memory_peak
__lowercase : int = PeakCPUMemory()
def lowercase ( ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case : str = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case : Union[str, Any] = torch.cuda.memory_allocated(__A )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase ( __A : str ) -> List[str]:
'''simple docstring'''
snake_case : Any = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
snake_case : List[str] = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case : List[Any] = (torch.cuda.memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
snake_case : Optional[int] = (torch.cuda.max_memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
return measures
def lowercase ( __A : Dict , __A : Optional[int] ) -> Dict:
'''simple docstring'''
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__A )]:.2f}MiB""" )
snake_case : Tuple = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 36 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__lowercase : List[str] = parser.parse_args()
if args.model_type == "bert":
__lowercase : int = BertForMaskedLM.from_pretrained(args.model_name)
__lowercase : List[Any] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
__lowercase : Dict = model.state_dict()
__lowercase : Optional[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
__lowercase : Union[str, Any] = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__lowercase : List[str] = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
__lowercase : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__lowercase : List[str] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__lowercase : Union[str, Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__lowercase : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__lowercase : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__lowercase : Dict = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__lowercase : Tuple = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__lowercase : Dict = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__lowercase : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__lowercase : Any = state_dict['''cls.predictions.decoder.weight''']
__lowercase : List[str] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowercase : Any = state_dict[f'''cls.predictions.transform.dense.{w}''']
__lowercase : str = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 36 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase : Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[Any] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = size if size is not None else {"""shortest_edge""": 224}
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Any = do_resize
snake_case : int = size
snake_case : List[Any] = resample
snake_case : int = do_center_crop
snake_case : str = crop_size
snake_case : List[Any] = do_rescale
snake_case : Union[str, Any] = rescale_factor
snake_case : Any = do_normalize
snake_case : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case : Any = do_convert_rgb
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ ,mean=SCREAMING_SNAKE_CASE_ ,std=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case : Union[str, Any] = size if size is not None else self.size
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""size""" ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : Any = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : Tuple = image_mean if image_mean is not None else self.image_mean
snake_case : Tuple = image_std if image_std is not None else self.image_std
snake_case : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case : List[str] = [convert_to_rgb(SCREAMING_SNAKE_CASE_ ) for image in images]
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : List[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : List[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
snake_case : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE_ ,mean=SCREAMING_SNAKE_CASE_ ,std=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
| 36 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 1 |
def lowercase ( __A : list ) -> list:
'''simple docstring'''
if len(__A ) < 2:
return collection
def circle_sort_util(__A : list , __A : int , __A : int ) -> bool:
snake_case : int = False
if low == high:
return swapped
snake_case : Union[str, Any] = low
snake_case : List[str] = high
while left < right:
if collection[left] > collection[right]:
snake_case , snake_case : Tuple = (
collection[right],
collection[left],
)
snake_case : Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
snake_case , snake_case : int = (
collection[right + 1],
collection[left],
)
snake_case : Optional[int] = True
snake_case : str = low + int((high - low) / 2 )
snake_case : Optional[Any] = circle_sort_util(__A , __A , __A )
snake_case : Any = circle_sort_util(__A , mid + 1 , __A )
return swapped or left_swap or right_swap
snake_case : int = True
while is_not_sorted is True:
snake_case : Optional[int] = circle_sort_util(__A , 0 , len(__A ) - 1 )
return collection
if __name__ == "__main__":
__lowercase : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
__lowercase : Any = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 36 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : List[str] = 16
__lowercase : List[str] = 32
def lowercase ( __A : Accelerator , __A : int = 16 , __A : str = "bert-base-cased" ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(__A )
snake_case : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__A : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case : Optional[int] = datasets.map(
__A , batched=__A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__A , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__A , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
snake_case : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
def lowercase ( __A : Union[str, Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
snake_case : List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : str = config["""lr"""]
snake_case : int = int(config["""num_epochs"""] )
snake_case : List[Any] = int(config["""seed"""] )
snake_case : Any = int(config["""batch_size"""] )
snake_case : Any = args.model_name_or_path
set_seed(__A )
snake_case , snake_case : str = get_dataloaders(__A , __A , __A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(__A , return_dict=__A )
# Instantiate optimizer
snake_case : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case : str = optimizer_cls(params=model.parameters() , lr=__A )
if accelerator.state.deepspeed_plugin is not None:
snake_case : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case : Optional[int] = 1
snake_case : int = (len(__A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=0 , num_training_steps=__A , )
else:
snake_case : Union[str, Any] = DummyScheduler(__A , total_num_steps=__A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
snake_case : Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case : int = 0
# Now we train the model
snake_case : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
snake_case : List[str] = 0
snake_case : Dict = {}
for epoch in range(__A , __A ):
model.train()
for step, batch in enumerate(__A ):
snake_case : int = model(**__A )
snake_case : Optional[Any] = outputs.loss
snake_case : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case : Tuple = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : int = model(**__A )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case , snake_case : Union[str, Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__A ) - 1:
snake_case : List[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case : Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__A , references=__A , )
snake_case : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __A )
snake_case : Any = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
snake_case : Union[str, Any] = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(__A , __A )
def lowercase ( ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__A , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__A , )
parser.add_argument(
"""--output_dir""" , type=__A , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=__A , default=__A , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=__A , default=3 , help="""Number of train epochs.""" , )
snake_case : List[str] = parser.parse_args()
snake_case : str = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 36 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _A ( snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = BertTokenizer
__lowerCamelCase : Tuple = BertTokenizerFast
__lowerCamelCase : List[str] = True
__lowerCamelCase : Dict = True
__lowerCamelCase : Any = filter_non_english
def snake_case_ ( self ):
'''simple docstring'''
super().setUp()
snake_case : Union[str, Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : str = """UNwant\u00E9d,running"""
snake_case : Any = """unwanted, running"""
return input_text, output_text
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file )
snake_case : List[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) ,[9, 6, 7, 12, 10, 11] )
def snake_case_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : Tuple = self.get_rust_tokenizer()
snake_case : Tuple = """UNwant\u00E9d,running"""
snake_case : Tuple = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = self.get_rust_tokenizer()
snake_case : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# With lower casing
snake_case : Union[str, Any] = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = """UNwant\u00E9d,running"""
snake_case : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
snake_case : Any = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : int = self.get_rust_tokenizer()
snake_case : str = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ,strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ,strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ,strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ,strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = BasicTokenizer()
snake_case : Any = """a\n'll !!to?'d of, can't."""
snake_case : Dict = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
snake_case : Optional[int] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
snake_case : Tuple = i
snake_case : Optional[Any] = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = self.get_tokenizer()
snake_case : Optional[int] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
snake_case : Any = tokenizer.encode("""sequence builders""" ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : Dict = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : int = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case : List[Any] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ ,return_attention_mask=SCREAMING_SNAKE_CASE_ ,return_token_type_ids=SCREAMING_SNAKE_CASE_ ,return_offsets_mapping=SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ,)
snake_case : int = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ ,"""do_lower_case""" ) else False
snake_case : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = ["""的""", """人""", """有"""]
snake_case : Union[str, Any] = """""".join(SCREAMING_SNAKE_CASE_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : List[Any] = True
snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : int = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = False
snake_case : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Any = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Dict = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case : int = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 1 |
from collections.abc import Callable
def lowercase ( __A : Callable[[float], float] , __A : float , __A : float ) -> float:
'''simple docstring'''
snake_case : float = a
snake_case : float = b
if function(__A ) == 0: # one of the a or b is a root for the function
return a
elif function(__A ) == 0:
return b
elif (
function(__A ) * function(__A ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__A ) == 0:
return mid
elif function(__A ) * function(__A ) < 0:
snake_case : List[str] = mid
else:
snake_case : int = mid
snake_case : int = start + (end - start) / 2.0
return mid
def lowercase ( __A : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 36 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowercase : List[str] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=19 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=5 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=None ,):
'''simple docstring'''
snake_case : Union[str, Any] = parent
snake_case : List[Any] = batch_size
snake_case : Union[str, Any] = seq_length
snake_case : Dict = is_training
snake_case : List[Any] = use_input_mask
snake_case : Optional[Any] = use_token_type_ids
snake_case : Optional[int] = use_labels
snake_case : Optional[int] = vocab_size
snake_case : List[str] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : Optional[Any] = hidden_act
snake_case : Dict = hidden_dropout_prob
snake_case : Dict = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : Any = type_sequence_label_size
snake_case : List[str] = initializer_range
snake_case : Tuple = num_labels
snake_case : Tuple = num_choices
snake_case : Any = scope
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case : Optional[Any] = None
if self.use_input_mask:
snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
snake_case : Tuple = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
snake_case : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = EsmConfig(
vocab_size=33 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=SCREAMING_SNAKE_CASE_ ,esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} ,)
return config
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE_ ).float()
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : str = model(SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ )
snake_case : int = model(SCREAMING_SNAKE_CASE_ )
snake_case : str = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : List[str] = config_and_inputs
snake_case : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = False
__lowerCamelCase : Optional[Any] = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCamelCase : Optional[int] = ()
__lowerCamelCase : int = {} if is_torch_available() else {}
__lowerCamelCase : Union[str, Any] = False
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = EsmFoldModelTester(self )
snake_case : List[Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE_ ,hidden_size=37 )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip("""Does not support attention outputs""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold only has one output format.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@require_torch
class _A ( snake_case ):
'''simple docstring'''
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
snake_case : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
snake_case : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )["""positions"""]
snake_case : Any = torch.tensor([2.58_28, 0.79_93, -10.93_34] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,SCREAMING_SNAKE_CASE_ ,atol=1E-4 ) )
| 36 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
def lowercase ( __A : Dict , __A : List[str]=False ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case : str = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowercase ( __A : int , __A : Optional[int] , __A : Optional[int]=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case : Dict = """"""
else:
snake_case : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case : Union[str, Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : List[str] = in_proj_weight[
: config.hidden_size, :
]
snake_case : str = in_proj_bias[: config.hidden_size]
snake_case : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : int = in_proj_weight[
-config.hidden_size :, :
]
snake_case : Optional[int] = in_proj_bias[-config.hidden_size :]
def lowercase ( __A : str ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__A , __A )
def lowercase ( __A : Tuple , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = dct.pop(__A )
snake_case : Any = val
def lowercase ( ) -> int:
'''simple docstring'''
snake_case : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : List[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : str , __A : str ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = ViTConfig()
snake_case : Dict = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case : Dict = True
snake_case : Union[str, Any] = int(vit_name[-12:-10] )
snake_case : Union[str, Any] = int(vit_name[-9:-6] )
else:
snake_case : Dict = 1000
snake_case : List[Any] = """huggingface/label-files"""
snake_case : str = """imagenet-1k-id2label.json"""
snake_case : Optional[int] = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
snake_case : Any = idalabel
snake_case : Optional[Any] = {v: k for k, v in idalabel.items()}
snake_case : Optional[Any] = int(vit_name[-6:-4] )
snake_case : Optional[Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
snake_case : Union[str, Any] = 192
snake_case : Optional[int] = 768
snake_case : Optional[int] = 12
snake_case : Tuple = 3
elif vit_name[9:].startswith("""small""" ):
snake_case : Tuple = 384
snake_case : Any = 1536
snake_case : Any = 12
snake_case : Any = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
snake_case : Optional[int] = 768
snake_case : str = 2304
snake_case : Optional[int] = 8
snake_case : int = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
snake_case : Optional[Any] = 1024
snake_case : List[str] = 4096
snake_case : List[str] = 24
snake_case : Optional[Any] = 16
elif vit_name[4:].startswith("""huge""" ):
snake_case : List[str] = 1280
snake_case : Dict = 5120
snake_case : Dict = 32
snake_case : Optional[Any] = 16
# load original model from timm
snake_case : Any = timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__A )
snake_case : List[Any] = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case : Optional[Any] = ViTModel(__A ).eval()
else:
snake_case : Dict = ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case : Dict = DeiTImageProcessor(size=config.image_size )
else:
snake_case : List[Any] = ViTImageProcessor(size=config.image_size )
snake_case : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case : List[Any] = encoding["""pixel_values"""]
snake_case : str = model(__A )
if base_model:
snake_case : List[Any] = timm_model.forward_features(__A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__A , outputs.pooler_output , atol=1E-3 )
else:
snake_case : Union[str, Any] = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 36 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 1 |
def lowercase ( __A : int ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
snake_case : Tuple = 1
snake_case : Union[str, Any] = 1
while repunit:
snake_case : Tuple = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase ( __A : int = 100_0000 ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__A ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Any = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[Any] = '''codegen'''
__lowerCamelCase : str = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=50400 ,SCREAMING_SNAKE_CASE_=2048 ,SCREAMING_SNAKE_CASE_=2048 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=28 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = vocab_size
snake_case : List[str] = n_ctx
snake_case : str = n_positions
snake_case : Tuple = n_embd
snake_case : List[Any] = n_layer
snake_case : Tuple = n_head
snake_case : Any = n_inner
snake_case : Union[str, Any] = rotary_dim
snake_case : Union[str, Any] = activation_function
snake_case : List[Any] = resid_pdrop
snake_case : List[str] = embd_pdrop
snake_case : str = attn_pdrop
snake_case : Tuple = layer_norm_epsilon
snake_case : int = initializer_range
snake_case : int = use_cache
snake_case : int = bos_token_id
snake_case : str = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,tie_word_embeddings=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ ,task=SCREAMING_SNAKE_CASE_ ,patching_specs=SCREAMING_SNAKE_CASE_ ,use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config ,"""pad_token_id""" ,SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
snake_case : Optional[Any] = 0
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ ,direction="""inputs""" )
snake_case : Dict = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case : Optional[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def snake_case_ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def snake_case_ ( self ):
'''simple docstring'''
return self._config.n_head
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
snake_case : List[str] = super(SCREAMING_SNAKE_CASE_ ,self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
snake_case : Any = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case : str = seqlen + 2
snake_case : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case : Optional[int] = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
snake_case : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
snake_case : Dict = ordered_inputs["""attention_mask"""].dtype
snake_case : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,dtype=SCREAMING_SNAKE_CASE_ )] ,dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ):
'''simple docstring'''
return 13
| 36 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowercase : Any = '''\
Text data.
Second line of data.'''
__lowercase : Tuple = '''file'''
@pytest.fixture(scope="""session""" )
def lowercase ( __A : List[str] ) -> int:
'''simple docstring'''
snake_case : Dict = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
snake_case : Optional[int] = bytes(__A , """utf-8""" )
with zstd.open(__A , """wb""" ) as f:
f.write(__A )
return path
@pytest.fixture
def lowercase ( __A : Dict ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , __A ) , """w""" ) as f:
f.write(__A )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def lowercase ( __A : Tuple , __A : Dict , __A : Optional[int] , __A : Dict , __A : List[str] , __A : int ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
snake_case : List[str] = input_paths[compression_format]
snake_case : str = tmp_path / """cache"""
snake_case : Dict = DownloadConfig(cache_dir=__A , extract_compressed_file=__A )
snake_case : Optional[int] = cached_path(__A , download_config=__A )
with open(__A ) as f:
snake_case : Optional[Any] = f.read()
with open(__A ) as f:
snake_case : List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def lowercase ( __A : List[str] , __A : Tuple , __A : int , __A : Dict , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = """custom_cache"""
snake_case : List[str] = """custom_extracted_dir"""
snake_case : List[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
snake_case : int = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , __A )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(__A ) )
snake_case : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case : Optional[Any] = xz_file
snake_case : Union[str, Any] = (
DownloadConfig(extract_compressed_file=__A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__A )
)
snake_case : str = cached_path(__A , download_config=__A )
assert Path(__A ).parent.parts[-2:] == expected
def lowercase ( __A : str ) -> str:
'''simple docstring'''
snake_case : List[str] = str(Path(__A ).resolve() )
assert cached_path(__A ) == text_file
# relative path
snake_case : Dict = str(Path(__A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__A ) == text_file
def lowercase ( __A : Dict ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(__A ):
cached_path(__A )
# relative path
snake_case : Tuple = """./__missing_file__.txt"""
with pytest.raises(__A ):
cached_path(__A )
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(__A ) as f:
snake_case : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __A )
def lowercase ( ) -> List[str]:
'''simple docstring'''
with pytest.raises(__A ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __A )
def lowercase ( __A : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__A ):
http_get("""https://huggingface.co""" , temp_file=__A )
with pytest.raises(__A ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __A )
def lowercase ( __A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__A ):
ftp_get("""ftp://huggingface.co""" , temp_file=__A )
with pytest.raises(__A ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __A )
def lowercase ( __A : Optional[int] ) -> str:
'''simple docstring'''
snake_case : int = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__A ):
fsspec_get("""s3://huggingface.co""" , temp_file=__A )
with pytest.raises(__A ):
fsspec_head("""s3://huggingface.co""" )
| 36 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = ['''DeiTFeatureExtractor''']
__lowercase : Tuple = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase ( ) -> List[str]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__A ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowercase ( ) -> Dict:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__A ):
http_head("""https://huggingface.co""" )
| 36 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : List[Any] = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowercase ( __A : Optional[Any] , __A : List[str] , __A : Union[str, Any] , __A : Optional[Any] , __A : str ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case : Optional[int] = getattr(__A , __A )
if weight_type is not None:
snake_case : Tuple = getattr(__A , __A ).shape
else:
snake_case : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case : Optional[int] = value
elif weight_type == "weight_g":
snake_case : int = value
elif weight_type == "weight_v":
snake_case : Union[str, Any] = value
elif weight_type == "bias":
snake_case : int = value
else:
snake_case : str = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowercase ( __A : Tuple , __A : Optional[int] , __A : int ) -> int:
'''simple docstring'''
snake_case : Any = []
snake_case : Optional[int] = fairseq_model.state_dict()
snake_case : Tuple = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == """group""" , )
snake_case : Dict = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Dict = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case : str = True
if "*" in mapped_key:
snake_case : Optional[int] = name.split(__A )[0].split(""".""" )[-2]
snake_case : Optional[Any] = mapped_key.replace("""*""" , __A )
if "weight_g" in name:
snake_case : Dict = """weight_g"""
elif "weight_v" in name:
snake_case : List[Any] = """weight_v"""
elif "weight" in name:
snake_case : Tuple = """weight"""
elif "bias" in name:
snake_case : int = """bias"""
else:
snake_case : List[Any] = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( __A : List[Any] , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : str = full_name.split("""conv_layers.""" )[-1]
snake_case : List[Any] = name.split(""".""" )
snake_case : Tuple = int(items[0] )
snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case : str = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
def lowercase ( __A : Optional[Any] , __A : List[str] ) -> str:
'''simple docstring'''
snake_case : Tuple = SEWConfig()
if is_finetuned:
snake_case : Dict = model.wav_encoder.wav_model.cfg
else:
snake_case : str = model.cfg
snake_case : Union[str, Any] = fs_config.conv_bias
snake_case : Any = eval(fs_config.conv_feature_layers )
snake_case : Any = [x[0] for x in conv_layers]
snake_case : Dict = [x[1] for x in conv_layers]
snake_case : Optional[int] = [x[2] for x in conv_layers]
snake_case : str = """gelu"""
snake_case : Tuple = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
snake_case : List[Any] = 0.0
snake_case : int = fs_config.activation_fn.name
snake_case : Any = fs_config.encoder_embed_dim
snake_case : Optional[int] = 0.02
snake_case : Dict = fs_config.encoder_ffn_embed_dim
snake_case : int = 1E-5
snake_case : List[Any] = fs_config.encoder_layerdrop
snake_case : List[Any] = fs_config.encoder_attention_heads
snake_case : Dict = fs_config.conv_pos_groups
snake_case : Dict = fs_config.conv_pos
snake_case : Union[str, Any] = len(__A )
snake_case : Dict = fs_config.encoder_layers
snake_case : Tuple = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
snake_case : List[Any] = model.cfg
snake_case : Optional[int] = fs_config.final_dropout
snake_case : List[str] = fs_config.layerdrop
snake_case : int = fs_config.activation_dropout
snake_case : List[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
snake_case : Union[str, Any] = fs_config.attention_dropout
snake_case : List[Any] = fs_config.dropout_input
snake_case : List[Any] = fs_config.dropout
snake_case : int = fs_config.mask_channel_length
snake_case : Any = fs_config.mask_channel_prob
snake_case : Dict = fs_config.mask_length
snake_case : Dict = fs_config.mask_prob
snake_case : Optional[Any] = """Wav2Vec2FeatureExtractor"""
snake_case : Tuple = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def lowercase ( __A : Any , __A : str , __A : Any=None , __A : int=None , __A : Union[str, Any]=True ) -> List[str]:
'''simple docstring'''
if is_finetuned:
snake_case , snake_case , snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
snake_case : int = SEWConfig.from_pretrained(__A )
else:
snake_case : List[str] = convert_config(model[0] , __A )
snake_case : Optional[Any] = model[0].eval()
snake_case : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
snake_case : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
if is_finetuned:
if dict_path:
snake_case : str = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case : Optional[Any] = target_dict.pad_index
snake_case : List[Any] = target_dict.bos_index
snake_case : Union[str, Any] = target_dict.pad_index
snake_case : Tuple = target_dict.bos_index
snake_case : Union[str, Any] = target_dict.eos_index
snake_case : str = len(target_dict.symbols )
snake_case : Tuple = os.path.join(__A , """vocab.json""" )
if not os.path.isdir(__A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
with open(__A , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __A )
snake_case : int = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__A , )
snake_case : Tuple = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
snake_case : Any = SEWForCTC(__A )
else:
snake_case : Tuple = SEWModel(__A )
feature_extractor.save_pretrained(__A )
recursively_load_weights(__A , __A , __A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase : Dict = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 36 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 1 |
def lowercase ( __A : str , __A : list[str] ) -> str:
'''simple docstring'''
snake_case : Optional[int] = """"""
for word_or_phrase in separated:
if not isinstance(__A , __A ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(__A )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 36 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 1 |
import math
def lowercase ( __A : float , __A : float ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__A ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 36 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''markuplm'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30522 ,SCREAMING_SNAKE_CASE_=768 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=3072 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=256 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=216 ,SCREAMING_SNAKE_CASE_=1001 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=50 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
snake_case : Union[str, Any] = vocab_size
snake_case : Any = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : Optional[int] = hidden_act
snake_case : Any = intermediate_size
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : Optional[Any] = max_position_embeddings
snake_case : Optional[int] = type_vocab_size
snake_case : List[Any] = initializer_range
snake_case : str = layer_norm_eps
snake_case : Union[str, Any] = position_embedding_type
snake_case : Any = use_cache
snake_case : Optional[Any] = classifier_dropout
# additional properties
snake_case : Tuple = max_depth
snake_case : Optional[Any] = max_xpath_tag_unit_embeddings
snake_case : Union[str, Any] = max_xpath_subs_unit_embeddings
snake_case : List[str] = tag_pad_id
snake_case : Dict = subs_pad_id
snake_case : Any = xpath_unit_hidden_size
| 36 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( lowerCamelCase ):
a__ = 42
a__ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = 5_0 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.unet.config.sample_size
__magic_name__ :Tuple = (batch_size, 3, img_size, img_size)
__magic_name__ :str = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__magic_name__ :Optional[int] = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__magic_name__ :Tuple = self.scheduler.schedule[t]
__magic_name__ :int = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__magic_name__ , __magic_name__ :Dict = self.scheduler.add_noise_to_input(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__magic_name__ :int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__magic_name__ :List[str] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__magic_name__ :List[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__magic_name__ :str = self.scheduler.step_correct(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , step_output.prev_sample , step_output['''derivative'''] , )
__magic_name__ :Dict = step_output.prev_sample
__magic_name__ :Dict = (sample / 2 + 0.5).clamp(0 , 1 )
__magic_name__ :str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ :List[Any] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 0 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__UpperCamelCase = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]],dtype=tf.intaa,) # J'aime le camembert !"
__UpperCamelCase = model(A_ )['last_hidden_state']
__UpperCamelCase = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape,A_ )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]],dtype=tf.floataa,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1E-4 ) )
| 1 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Union[str, Any]=99 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Any=5 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Union[str, Any]=37 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : int=3 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Optional[int]=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def snake_case_ ( self : Tuple ) -> Union[str, Any]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : Any ) -> Union[str, Any]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Dict:
_A = DistilBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , __lowerCAmelCase )
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Any:
_A = DistilBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> List[Any]:
_A = DistilBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> Dict:
_A = self.num_labels
_A = DistilBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> Tuple:
_A = self.num_labels
_A = DistilBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Union[str, Any]:
_A = self.num_choices
_A = DistilBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
_A = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A) , (_A) , (_A)) = config_and_inputs
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : int = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a__ : Dict = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : int = True
a__ : Union[str, Any] = True
a__ : Optional[Any] = True
a__ : Dict = True
def snake_case_ ( self : List[str] ) -> Tuple:
_A = DistilBertModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase , dim=37 )
def snake_case_ ( self : List[str] ) -> str:
self.config_tester.run_common_tests()
def snake_case_ ( self : Any ) -> str:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCAmelCase )
def snake_case_ ( self : Dict ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCAmelCase )
def snake_case_ ( self : Dict ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCAmelCase )
@slow
def snake_case_ ( self : Optional[int] ) -> Dict:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DistilBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
@require_torch_gpu
def snake_case_ ( self : Any ) -> str:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_A = True
_A = model_class(config=__lowerCAmelCase )
_A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_A = torch.jit.trace(
__lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) )
_A = torch.jit.load(os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) , map_location=__lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(__lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(__lowerCAmelCase ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def snake_case_ ( self : Optional[Any] ) -> int:
_A = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_A = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_A = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __lowerCAmelCase )
_A = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) )
| 2 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ = """ChineseCLIPImageProcessor"""
lowerCAmelCase_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , A_=None , A_=None , **A_ )-> int:
'''simple docstring'''
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A_ , )
UpperCamelCase = kwargs.pop('feature_extractor' )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_ , A_ )
UpperCamelCase = self.image_processor
def __call__( self , A_=None , A_=None , A_=None , **A_ )-> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCamelCase = self.tokenizer(A_ , return_tensors=A_ , **A_ )
if images is not None:
UpperCamelCase = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def UpperCAmelCase_ ( self , *A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def UpperCAmelCase_ ( self , *A_ , **A_ )-> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , A_ , )
return self.image_processor_class
| 3 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( a__ ):
snake_case__ = '''glpn'''
def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = max_depth
lowerCAmelCase = head_in_index
| 4 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A ():
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_lowerCAmelCase = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 5 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 0 |
from ...configuration_utils import PretrainedConfig
_lowerCamelCase = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "tapas"
def __init__( self :Tuple , __A :List[Any]=3_0522 , __A :Dict=768 , __A :List[Any]=12 , __A :int=12 , __A :Any=3072 , __A :str="gelu" , __A :Union[str, Any]=0.1 , __A :Optional[Any]=0.1 , __A :Tuple=1024 , __A :Any=[3, 256, 256, 2, 256, 256, 10] , __A :Optional[Any]=0.0_2 , __A :Tuple=1E-12 , __A :Any=0 , __A :Union[str, Any]=1_0.0 , __A :Any=0 , __A :Tuple=1.0 , __A :Optional[int]=None , __A :Dict=1.0 , __A :Optional[Any]=False , __A :Union[str, Any]=None , __A :List[Any]=1.0 , __A :int=1.0 , __A :Optional[int]=False , __A :Optional[int]=False , __A :Tuple="ratio" , __A :List[Any]=None , __A :Optional[int]=None , __A :Any=64 , __A :Tuple=32 , __A :Any=False , __A :List[str]=True , __A :Optional[Any]=False , __A :str=False , __A :List[str]=True , __A :Any=False , __A :Optional[int]=None , __A :Any=None , **__A :Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__A , **__A )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __A ):
SCREAMING_SNAKE_CASE__ = {int(__A ): v for k, v in aggregation_labels.items()} | 6 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 0 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(_UpperCAmelCase ) )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = [sequences]
_A = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_UpperCAmelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__lowerCAmelCase )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : int=ZeroShotClassificationArgumentHandler() , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[int] ):
_A = args_parser
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def lowerCAmelCase_ ( self : int ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=TruncationStrategy.ONLY_FIRST , **_UpperCAmelCase : Dict ):
_A = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
_A = self.tokenizer.eos_token
try:
_A = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , )
except Exception as e:
if "too short" in str(_UpperCAmelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_A = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase_ ( self : int , **_UpperCAmelCase : Tuple ):
if kwargs.get('multi_class' , _UpperCAmelCase ) is not None:
_A = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
_A = {}
if "candidate_labels" in kwargs:
_A = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
_A = kwargs['hypothesis_template']
_A = {}
if "multi_label" in kwargs:
_A = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self : Any , _UpperCAmelCase : Union[str, List[str]] , *_UpperCAmelCase : int , **_UpperCAmelCase : Dict , ):
if len(_UpperCAmelCase ) == 0:
pass
elif len(_UpperCAmelCase ) == 1 and "candidate_labels" not in kwargs:
_A = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[Any]="This example is {}." ):
_A , _A = self._args_parser(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
_A = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_UpperCAmelCase ) - 1,
**model_input,
}
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : int ):
_A = inputs['candidate_label']
_A = inputs['sequence']
_A = {k: inputs[k] for k in self.tokenizer.model_input_names}
_A = self.model(**_UpperCAmelCase )
_A = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict=False ):
_A = [outputs['candidate_label'] for outputs in model_outputs]
_A = [outputs['sequence'] for outputs in model_outputs]
_A = np.concatenate([output['logits'].numpy() for output in model_outputs] )
_A = logits.shape[0]
_A = len(_UpperCAmelCase )
_A = N // n
_A = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_UpperCAmelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_A = self.entailment_id
_A = -1 if entailment_id == 0 else 0
_A = reshaped_outputs[..., [contradiction_id, entailment_id]]
_A = np.exp(_UpperCAmelCase ) / np.exp(_UpperCAmelCase ).sum(-1 , keepdims=_UpperCAmelCase )
_A = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_A = reshaped_outputs[..., self.entailment_id]
_A = np.exp(_UpperCAmelCase ) / np.exp(_UpperCAmelCase ).sum(-1 , keepdims=_UpperCAmelCase )
_A = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 7 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 0 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
from __future__ import annotations
import math
def A ( __UpperCamelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
SCREAMING_SNAKE_CASE__ = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def A ( __UpperCamelCase ) -> list[int]:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
A__ = []
for num in range(len(__UpperCamelCase ) ):
A__ = 0
while 2 * i * i <= odd_composites[num]:
A__ = odd_composites[num] - 2 * i * i
if is_prime(__UpperCamelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__UpperCamelCase ) == n:
return list_nums
return []
def A ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 9 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 0 |
from __future__ import annotations
import math
def _snake_case ( __snake_case ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCAmelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_UpperCamelCase = []
for num in range(len(__snake_case ) ):
_UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
_UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(__snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__snake_case ) == n:
return list_nums
return []
def _snake_case ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A = 600_851_475_143):
"""simple docstring"""
try:
_a = int(__A)
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''')
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''')
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(__A)
if __name__ == "__main__":
print(F"""{solution() = }""")
| 11 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ : Any = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
lowerCamelCase__ : Union[str, Any] = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
lowerCamelCase__ : List[Any] = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
lowerCamelCase__ : Dict = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowercase__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
import nltk
nltk.download("""wordnet""")
if NLTK_VERSION >= version.Version("""3.6.5"""):
nltk.download("""punkt""")
if NLTK_VERSION >= version.Version("""3.6.6"""):
nltk.download("""omw-1.4""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.9 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=0.5):
'''simple docstring'''
if NLTK_VERSION >= version.Version("""3.6.5"""):
lowercase__ : Dict = [
meteor_score.single_meteor_score(
word_tokenize(SCREAMING_SNAKE_CASE_) , word_tokenize(SCREAMING_SNAKE_CASE_) , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , gamma=SCREAMING_SNAKE_CASE_)
for ref, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
]
else:
lowercase__ : List[Any] = [
meteor_score.single_meteor_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , gamma=SCREAMING_SNAKE_CASE_)
for ref, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
]
return {"meteor": np.mean(SCREAMING_SNAKE_CASE_)}
| 12 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> bool:
__lowerCamelCase : Dict = [int(UpperCAmelCase_ ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(UpperCAmelCase_ ) == 4 and all(0 <= int(UpperCAmelCase_ ) <= 2_54 for octet in octets )
if __name__ == "__main__":
A__ : Optional[Any] = input().strip()
A__ : Tuple = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 13 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a__ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
a__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __UpperCAmelCase ( __a : Any ,__a : List[str]=False ) -> List[Any]:
"""simple docstring"""
_a , _a : List[Any] = create_model(
'''HTSAT-tiny''' ,'''roberta''' ,__a ,precision='''fp32''' ,device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' ,enable_fusion=__a ,fusion_type='''aff_2d''' if enable_fusion else None ,)
return model, model_cfg
def __UpperCAmelCase ( __a : str ) -> Union[str, Any]:
"""simple docstring"""
_a : Any = {}
_a : int = R'''.*sequential.(\d+).*'''
_a : List[Any] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a : List[Any] = key.replace(__a ,__a )
if re.match(__a ,__a ):
# replace sequential layers with list
_a : int = re.match(__a ,__a ).group(1 )
_a : List[str] = key.replace(F"""sequential.{sequential_layer}.""" ,F"""layers.{int(__a )//3}.linear.""" )
elif re.match(__a ,__a ):
_a : Optional[Any] = int(re.match(__a ,__a ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_a : Optional[int] = 1 if projecton_layer == 0 else 2
_a : List[str] = key.replace(F"""_projection.{projecton_layer}.""" ,F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_a : List[str] = value
_a : Union[str, Any] = mixed_qkv.size(0 ) // 3
_a : Tuple = mixed_qkv[:qkv_dim]
_a : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
_a : Dict = mixed_qkv[qkv_dim * 2 :]
_a : Optional[int] = query_layer
_a : Tuple = key_layer
_a : Tuple = value_layer
else:
_a : Tuple = value
return model_state_dict
def __UpperCAmelCase ( __a : Tuple ,__a : Optional[int] ,__a : Dict ,__a : Dict=False ) -> Any:
"""simple docstring"""
_a , _a : Optional[Any] = init_clap(__a ,enable_fusion=__a )
clap_model.eval()
_a : Tuple = clap_model.state_dict()
_a : Union[str, Any] = rename_state_dict(__a )
_a : Union[str, Any] = ClapConfig()
_a : Dict = enable_fusion
_a : Dict = ClapModel(__a )
# ignore the spectrogram embedding layer
model.load_state_dict(__a ,strict=__a )
model.save_pretrained(__a )
transformers_config.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
a__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 14 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
A : Tuple = logging.getLogger(__name__)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any=None ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , index=_UpperCAmelCase , init_retrieval=_UpperCAmelCase , )
lowercase__ = None
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
lowercase__ = self._infer_socket_ifname()
# avoid clash with the NCCL port
lowercase__ = str(distributed_port + 1 )
lowercase__ = dist.new_group(ranks=_UpperCAmelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple=torch.floataa ) -> Tuple:
"""simple docstring"""
lowercase__ = torch.empty(_UpperCAmelCase , dtype=_UpperCAmelCase )
dist.scatter(_UpperCAmelCase , src=0 , scatter_list=_UpperCAmelCase , group=self.process_group )
return target_tensor
def lowerCamelCase__ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
lowercase__ = next((addr for addr in addrs if addr.startswith("""e""" )) , _UpperCAmelCase )
return ifname
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
lowercase__ , lowercase__ = self._main_retrieve(_UpperCAmelCase , _UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCAmelCase )
# distributed training
lowercase__ = dist.get_world_size(group=self.process_group )
# gather logic
lowercase__ = None
if self._is_main():
lowercase__ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_UpperCAmelCase )]
dist.gather(torch.tensor(_UpperCAmelCase ) , dst=0 , gather_list=_UpperCAmelCase , group=self.process_group )
# scatter logic
lowercase__ = question_hidden_states.shape[0]
lowercase__ = []
lowercase__ = []
if self._is_main():
assert len(_UpperCAmelCase ) == world_size
lowercase__ , lowercase__ = self._main_retrieve(torch.cat(_UpperCAmelCase ).numpy() , _UpperCAmelCase )
lowercase__ , lowercase__ = torch.tensor(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
lowercase__ = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = self._scattered(_UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
lowercase__ = self._scattered(_UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_UpperCAmelCase )
| 15 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__A : List[Any] = logging.get_logger(__name__)
__A : int = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
__A : List[str] = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
__A : Optional[Any] = {
'jukebox': 5_1_2,
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=["v3", "v2", "v2"] , __lowerCamelCase : str=512 , __lowerCamelCase : str=5 , __lowerCamelCase : List[str]="<|endoftext|>" , **__lowerCamelCase : Tuple , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
super().__init__(
unk_token=__lowerCamelCase , n_genres=__lowerCamelCase , version=__lowerCamelCase , max_n_lyric_tokens=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = version
SCREAMING_SNAKE_CASE = max_n_lyric_tokens
SCREAMING_SNAKE_CASE = n_genres
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
SCREAMING_SNAKE_CASE = oov.replace(r"\-'" , r"\-+'" )
SCREAMING_SNAKE_CASE = regex.compile(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.artists_encoder.items()}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.genres_encoder.items()}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _snake_case ( self : str ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _snake_case ( self : Dict ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = [self.artists_encoder.get(__lowerCamelCase , 0 ) for artist in list_artists]
for genres in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE = [self.genres_encoder.get(__lowerCamelCase , 0 ) for genre in list_genres[genres]]
SCREAMING_SNAKE_CASE = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
SCREAMING_SNAKE_CASE = [[self.lyrics_encoder.get(__lowerCamelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _snake_case ( self : List[Any] , __lowerCamelCase : Any ):
return list(__lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , **__lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_for_tokenization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self._tokenize(__lowerCamelCase )
return artist, genre, lyrics
def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : bool = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
SCREAMING_SNAKE_CASE = artists[idx].lower()
SCREAMING_SNAKE_CASE = [genres[idx].lower()]
else:
SCREAMING_SNAKE_CASE = self._normalize(artists[idx] ) + ".v2"
SCREAMING_SNAKE_CASE = [
self._normalize(__lowerCamelCase ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
SCREAMING_SNAKE_CASE = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
SCREAMING_SNAKE_CASE = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
SCREAMING_SNAKE_CASE = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) + 1
SCREAMING_SNAKE_CASE = self.vocab
SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
SCREAMING_SNAKE_CASE = self._run_strip_accents(__lowerCamelCase )
SCREAMING_SNAKE_CASE = lyrics.replace("\\" , "\n" )
SCREAMING_SNAKE_CASE = self.out_of_vocab.sub("" , __lowerCamelCase ), [], []
return artists, genres, lyrics
def _snake_case ( self : Any , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = unicodedata.normalize("NFD" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for char in text:
SCREAMING_SNAKE_CASE = unicodedata.category(__lowerCamelCase )
if cat == "Mn":
continue
output.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = (
[chr(__lowerCamelCase ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
SCREAMING_SNAKE_CASE = frozenset(__lowerCamelCase )
SCREAMING_SNAKE_CASE = re.compile(r"_+" )
SCREAMING_SNAKE_CASE = "".join([c if c in accepted else "_" for c in text.lower()] )
SCREAMING_SNAKE_CASE = pattern.sub("_" , __lowerCamelCase ).strip("_" )
return text
def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ):
return " ".join(__lowerCamelCase )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : bool = False ):
# Convert to TensorType
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = TensorType(__lowerCamelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
SCREAMING_SNAKE_CASE = tf.constant
SCREAMING_SNAKE_CASE = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
SCREAMING_SNAKE_CASE = torch.tensor
SCREAMING_SNAKE_CASE = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
SCREAMING_SNAKE_CASE = jnp.array
SCREAMING_SNAKE_CASE = _is_jax
else:
SCREAMING_SNAKE_CASE = np.asarray
SCREAMING_SNAKE_CASE = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
SCREAMING_SNAKE_CASE = [inputs]
if not is_tensor(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = as_tensor(__lowerCamelCase )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : str="" , __lowerCamelCase : Union[str, Any]="pt" ):
SCREAMING_SNAKE_CASE = [0, 0, 0]
SCREAMING_SNAKE_CASE = [artist] * len(self.version )
SCREAMING_SNAKE_CASE = [genres] * len(self.version )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.tokenize(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._convert_token_to_id(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [-INFINITY] * len(full_tokens[-1] )
SCREAMING_SNAKE_CASE = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__lowerCamelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def _snake_case ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__lowerCamelCase ) )
return (artists_file, genres_file, lyrics_file)
def _snake_case ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = self.artists_decoder.get(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index]
SCREAMING_SNAKE_CASE = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index]
return artist, genres, lyrics | 16 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : list[float] ,a__ : list[float] ) -> float:
__A : str = sorted(numsa + numsa )
__A , __A : Any = divmod(len(a__ ) ,2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Dict = [float(x) for x in input('''Enter the elements of first array: ''').split()]
UpperCAmelCase_ : List[str] = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 17 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = "AutoTokenizer"
__lowerCamelCase : Tuple = ["tokenizer"]
__lowerCamelCase : List[str] = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> int:
super().__init__(_lowerCAmelCase )
_lowerCAmelCase = speaker_embeddings
@classmethod
def _snake_case ( cls , _lowerCAmelCase , _lowerCAmelCase="speaker_embeddings_path.json" , **_lowerCAmelCase ) -> List[Any]:
if speaker_embeddings_dict_path is not None:
_lowerCAmelCase = get_file_from_repo(
_lowerCAmelCase , _lowerCAmelCase , subfolder=kwargs.pop("subfolder" , _lowerCAmelCase ) , cache_dir=kwargs.pop("cache_dir" , _lowerCAmelCase ) , force_download=kwargs.pop("force_download" , _lowerCAmelCase ) , proxies=kwargs.pop("proxies" , _lowerCAmelCase ) , resume_download=kwargs.pop("resume_download" , _lowerCAmelCase ) , local_files_only=kwargs.pop("local_files_only" , _lowerCAmelCase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowerCAmelCase ) , revision=kwargs.pop("revision" , _lowerCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowerCAmelCase , _lowerCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_lowerCAmelCase = None
else:
with open(_lowerCAmelCase ) as speaker_embeddings_json:
_lowerCAmelCase = json.load(_lowerCAmelCase )
else:
_lowerCAmelCase = None
_lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
return cls(tokenizer=_lowerCAmelCase , speaker_embeddings=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase="speaker_embeddings_path.json" , _lowerCAmelCase="speaker_embeddings" , _lowerCAmelCase = False , **_lowerCAmelCase , ) -> List[str]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowerCAmelCase , _lowerCAmelCase , "v2" ) , exist_ok=_lowerCAmelCase )
_lowerCAmelCase = {}
_lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCAmelCase = self._load_voice_preset(_lowerCAmelCase )
_lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowerCAmelCase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowerCAmelCase , )
_lowerCAmelCase = os.path.join(_lowerCAmelCase , f'''{prompt_key}_{key}.npy''' )
_lowerCAmelCase = tmp_dict
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase = None , **_lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.speaker_embeddings[voice_preset]
_lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowerCAmelCase ) , cache_dir=kwargs.pop("cache_dir" , _lowerCAmelCase ) , force_download=kwargs.pop("force_download" , _lowerCAmelCase ) , proxies=kwargs.pop("proxies" , _lowerCAmelCase ) , resume_download=kwargs.pop("resume_download" , _lowerCAmelCase ) , local_files_only=kwargs.pop("local_files_only" , _lowerCAmelCase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowerCAmelCase ) , revision=kwargs.pop("revision" , _lowerCAmelCase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_lowerCAmelCase = np.load(_lowerCAmelCase )
return voice_preset_dict
def _snake_case ( self , _lowerCAmelCase = None ) -> Any:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="pt" , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , **_lowerCAmelCase , ) -> List[str]:
if voice_preset is not None and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCAmelCase = self._load_voice_preset(_lowerCAmelCase )
else:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not voice_preset.endswith(".npz" ):
_lowerCAmelCase = voice_preset + ".npz"
_lowerCAmelCase = np.load(_lowerCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
_lowerCAmelCase = self.tokenizer(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , padding="max_length" , max_length=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
if voice_preset is not None:
_lowerCAmelCase = voice_preset
return encoded_text
| 18 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 0 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_a = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
_a = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
_a = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
_a = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
_a = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase( datasets.Metric ):
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Value('''string'''),
}) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def UpperCAmelCase ( self , __a , __a , __a=[1, 10, 1_00] , __a=4 , __a=3.0) -> int:
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''')
with ThreadPoolExecutor(max_workers=__a) as executor:
_UpperCamelCase = []
_UpperCamelCase = Counter()
_UpperCamelCase = 0
_UpperCamelCase = defaultdict(__a)
for task_id, (candidates, test_case) in enumerate(zip(__a , __a)):
for candidate in candidates:
_UpperCamelCase = candidate + '''\n''' + test_case
_UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCamelCase = executor.submit(__a , *__a)
futures.append(__a)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__a):
_UpperCamelCase = future.result()
results[result["task_id"]].append((result['''completion_id'''], result))
_UpperCamelCase , _UpperCamelCase = [], []
for result in results.values():
result.sort()
_UpperCamelCase = [r[1]['''passed'''] for r in result]
total.append(len(__a))
correct.append(sum(__a))
_UpperCamelCase = np.array(__a)
_UpperCamelCase = np.array(__a)
_UpperCamelCase = k
_UpperCamelCase = {F'''pass@{k}''': estimate_pass_at_k(__a , __a , __a).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
def estimator(__snake_case, __snake_case, __snake_case ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = itertools.repeat(__snake_case, len(__snake_case ) )
else:
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = iter(__snake_case )
return np.array([estimator(int(__snake_case ), int(__snake_case ), __snake_case ) for n, c in zip(__snake_case, __snake_case )] )
| 19 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_a = 1
_a = 3
_a = (32, 32)
_a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
@property
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
def extract(*lowerCAmelCase_ : str , **lowerCAmelCase_ : List[Any] ):
class A :
def __init__( self : Dict ) -> int:
"""simple docstring"""
_a = torch.ones([0] )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.pixel_values.to(lowerCAmelCase_ )
return self
return Out()
return extract
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet
_a = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
_a = output.images
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
_a = output.images
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert isinstance(pipe.scheduler , lowerCAmelCase_ )
assert pipe.safety_checker is None
_a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_a = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
_a = unet.half()
_a = vae.half()
_a = bert.half()
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ )
_a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
_a = 40_03_66_03_46
_a = 7
# without safety guidance (sld_guidance_scale = 0)
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ )
_a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''padme amidala taking a bath artwork, safe for work, no nudity'''
_a = 27_34_97_17_55
_a = 7
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
_a = 10_44_35_52_34
_a = 12
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """xlm-roberta"""
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> List[str]:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = classifier_dropout
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 23 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> Optional[Any]:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> int:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__snake_case = features.copy() if features else default_expected_features
__snake_case = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__snake_case = features.copy() if features else default_expected_features
__snake_case = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] )-> Any:
'''simple docstring'''
__snake_case = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__snake_case = features.copy()
__snake_case = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = tmp_path / '''cache'''
__snake_case = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : str )-> int:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__snake_case = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int )-> List[str]:
'''simple docstring'''
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case = [jsonl_path]
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__snake_case = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int]=("train",) )-> Tuple:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
__snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] )-> str:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict )-> List[str]:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__snake_case = features.copy() if features else default_expected_features
__snake_case = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = JsonDatasetReader({'''train''': jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str )-> int:
'''simple docstring'''
if split:
__snake_case = {split: jsonl_path}
else:
__snake_case = '''train'''
__snake_case = {'''train''': jsonl_path, '''test''': jsonl_path}
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__snake_case = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> str:
'''simple docstring'''
return json.load(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
return [json.loads(_lowerCamelCase ) for line in buffer]
class lowerCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
__snake_case = load_json_function(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , __SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
__snake_case = load_json(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__SCREAMING_SNAKE_CASE , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
__snake_case = load_json_function(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , __SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
__snake_case = load_json(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__SCREAMING_SNAKE_CASE , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__SCREAMING_SNAKE_CASE ) == 10
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
with pytest.raises(__SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__snake_case = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , compression=__SCREAMING_SNAKE_CASE ).write()
with fsspec.open(__SCREAMING_SNAKE_CASE , '''rb''' , compression='''infer''' ) as f:
__snake_case = f.read()
with fsspec.open(__SCREAMING_SNAKE_CASE , '''rb''' , compression='''infer''' ) as f:
__snake_case = f.read()
assert exported_content == original_content
| 24 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ ='swin'
lowerCamelCase__ ={
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , a : Tuple=224 , a : Optional[Any]=4 , a : Optional[Any]=3 , a : Optional[Any]=96 , a : Union[str, Any]=[2, 2, 6, 2] , a : Tuple=[3, 6, 12, 24] , a : Union[str, Any]=7 , a : List[str]=4.0 , a : Optional[Any]=True , a : Union[str, Any]=0.0 , a : Optional[int]=0.0 , a : Union[str, Any]=0.1 , a : List[Any]="gelu" , a : Union[str, Any]=False , a : Any=0.02 , a : str=1e-5 , a : Any=32 , a : Optional[int]=None , a : str=None , **a : Dict , ) -> Any:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Any = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a )
SCREAMING_SNAKE_CASE : Dict = num_heads
SCREAMING_SNAKE_CASE : Optional[int] = window_size
SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : int = qkv_bias
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : int = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(a ) - 1) )
SCREAMING_SNAKE_CASE : Union[str, Any] = ["stem"] + [F"stage{idx}" for idx in range(1 , len(a ) + 1 )]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __UpperCamelCase ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1e-4 | 25 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _A ( unittest.TestCase ):
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = jnp.ones((batch_size, length) ) / length
return scores
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = None
__snake_case : Tuple = 20
__snake_case : Dict = self._get_uniform_logits(batch_size=2 , length=__magic_name__ )
# tweak scores to not be uniform anymore
__snake_case : int = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__snake_case : Optional[int] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__snake_case : str = jax.nn.softmax(__magic_name__ , axis=-1 )
__snake_case : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
__snake_case : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(__magic_name__ , scores.copy() , cur_len=__magic_name__ ) , axis=-1 )
__snake_case : Optional[Any] = jax.nn.softmax(temp_dist_warper_smoother(__magic_name__ , scores.copy() , cur_len=__magic_name__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : int = None
__snake_case : List[str] = 10
__snake_case : List[Any] = 2
# create ramp distribution
__snake_case : Optional[Any] = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, vocab_size) ).copy()
__snake_case : Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
__snake_case : List[str] = FlaxTopKLogitsWarper(3 )
__snake_case : Dict = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__snake_case : Any = 5
__snake_case : Dict = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__snake_case : Any = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, length) ).copy()
__snake_case : List[str] = top_k_warp_safety_check(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = None
__snake_case : Any = 10
__snake_case : List[str] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__snake_case : Any = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__snake_case : Any = FlaxTopPLogitsWarper(0.8 )
__snake_case : Tuple = np.exp(top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__snake_case : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__snake_case : Optional[Any] = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__snake_case : Optional[int] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__snake_case : Union[str, Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__snake_case : Union[str, Any] = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = 20
__snake_case : Tuple = 4
__snake_case : List[Any] = 0
__snake_case : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ )
# check that min length is applied at length 5
__snake_case : Union[str, Any] = ids_tensor((batch_size, 20) , vocab_size=20 )
__snake_case : int = 5
__snake_case : Dict = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Any = min_dist_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
__snake_case : str = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = 15
__snake_case : str = min_dist_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertFalse(jnp.isinf(__magic_name__ ).any() )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Dict = 20
__snake_case : List[str] = 4
__snake_case : Tuple = 0
__snake_case : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ )
# check that all scores are -inf except the bos_token_id score
__snake_case : List[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
__snake_case : Optional[int] = 1
__snake_case : Tuple = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : List[str] = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__snake_case : Any = 3
__snake_case : Union[str, Any] = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Any = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertFalse(jnp.isinf(__magic_name__ ).any() )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
__snake_case : List[Any] = 20
__snake_case : Union[str, Any] = 4
__snake_case : Dict = 0
__snake_case : Optional[int] = 5
__snake_case : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__snake_case : Optional[int] = ids_tensor((batch_size, 4) , vocab_size=20 )
__snake_case : int = 4
__snake_case : Any = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Dict = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__snake_case : Optional[Any] = 3
__snake_case : Optional[Any] = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Any = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertFalse(jnp.isinf(__magic_name__ ).any() )
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = 4
__snake_case : int = 10
__snake_case : List[Any] = 15
__snake_case : Union[str, Any] = 2
__snake_case : List[str] = 1
__snake_case : Optional[Any] = 15
# dummy input_ids and scores
__snake_case : Dict = ids_tensor((batch_size, sequence_length) , __magic_name__ )
__snake_case : Any = input_ids.copy()
__snake_case : List[Any] = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : str = scores.copy()
# instantiate all dist processors
__snake_case : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : Union[str, Any] = FlaxTopKLogitsWarper(3 )
__snake_case : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__snake_case : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ )
__snake_case : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ )
__snake_case : str = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ )
__snake_case : Tuple = 10
# no processor list
__snake_case : Dict = temp_dist_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Any = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : str = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Optional[int] = min_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Optional[Any] = bos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Optional[Any] = eos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# with processor list
__snake_case : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__snake_case : int = processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# scores should be equal
self.assertTrue(jnp.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = 4
__snake_case : Optional[int] = 10
__snake_case : Optional[Any] = 15
__snake_case : Dict = 2
__snake_case : Tuple = 1
__snake_case : Optional[int] = 15
# dummy input_ids and scores
__snake_case : List[Any] = ids_tensor((batch_size, sequence_length) , __magic_name__ )
__snake_case : str = input_ids.copy()
__snake_case : str = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : int = scores.copy()
# instantiate all dist processors
__snake_case : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : Optional[Any] = FlaxTopKLogitsWarper(3 )
__snake_case : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__snake_case : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ )
__snake_case : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ )
__snake_case : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ )
__snake_case : Dict = 10
# no processor list
def run_no_processor_list(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : int ):
__snake_case : Union[str, Any] = temp_dist_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Tuple = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : List[Any] = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Union[str, Any] = min_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : List[Any] = bos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : List[str] = eos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
return scores
# with processor list
def run_processor_list(__magic_name__ : int , __magic_name__ : Any , __magic_name__ : Dict ):
__snake_case : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__snake_case : Optional[Any] = processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
return scores
__snake_case : int = jax.jit(__magic_name__ )
__snake_case : List[Any] = jax.jit(__magic_name__ )
__snake_case : List[str] = jitted_run_no_processor_list(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : Tuple = jitted_run_processor_list(__magic_name__ , __magic_name__ , __magic_name__ )
# scores should be equal
self.assertTrue(jnp.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 26 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A : Optional[int] = logging.get_logger(__name__)
__A : Any = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'imagegpt'
__magic_name__ = ['past_key_values']
__magic_name__ = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , snake_case_=512 + 1 , snake_case_=32 * 32 , snake_case_=512 , snake_case_=24 , snake_case_=8 , snake_case_=None , snake_case_="quick_gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , **snake_case_ , ):
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = scale_attn_by_inverse_layer_idx
_A = reorder_and_upcast_attn
_A = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case_ , **snake_case_ )
class lowerCamelCase( __snake_case ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = 1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 32 , snake_case_ = 32 , ):
_A = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_A = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
return inputs
| 27 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 0 |
'''simple docstring'''
import inspect
import unittest
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE : Any = inspect.getmembers(A, inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE : Dict = 'k-diffusion'
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE : Union[str, Any] = 'invisible-watermark'
assert backend in deps, F"{backend} is not in the deps table!"
| 28 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 0 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
A_ = logging.get_logger(__name__)
@dataclass
class __lowerCamelCase :
a__: str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
a__: str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
a__: int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.task_name.lower()
class __lowerCamelCase ( lowerCAmelCase ):
a__: Dict = 'train'
a__: str = 'dev'
a__: Union[str, Any] = 'test'
class __lowerCamelCase ( lowerCAmelCase ):
a__: GlueDataTrainingArguments
a__: str
a__: List[InputFeatures]
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = Split.train , UpperCAmelCase = None , ):
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , UpperCAmelCase , )
lowerCamelCase_ = args
lowerCamelCase_ = glue_processors[args.task_name]()
lowerCamelCase_ = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , )
lowerCamelCase_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_ , lowerCamelCase_ = label_list[2], label_list[1]
lowerCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + '''.lock'''
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(UpperCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
else:
logger.info(f"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase_ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase_ = examples[:limit_length]
lowerCamelCase_ = glue_convert_examples_to_features(
UpperCAmelCase , UpperCAmelCase , max_length=args.max_seq_length , label_list=UpperCAmelCase , output_mode=self.output_mode , )
lowerCamelCase_ = time.time()
torch.save(self.features , UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase ):
return self.features[i]
def UpperCAmelCase__ ( self ):
return self.label_list
| 29 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ : List[str] = model_name.find('''patch''' )
UpperCAmelCase_ : Optional[int] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
UpperCAmelCase_ : List[str] = XCLIPVisionConfig(patch_size=_lowercase , num_frames=_lowercase )
if "large" in model_name:
UpperCAmelCase_ : Optional[int] = 768
UpperCAmelCase_ : Dict = 3072
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : str = 1024
UpperCAmelCase_ : Any = 4096
UpperCAmelCase_ : Union[str, Any] = 16
UpperCAmelCase_ : Optional[Any] = 24
UpperCAmelCase_ : List[Any] = 768
UpperCAmelCase_ : Dict = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : int = 336
UpperCAmelCase_ : List[Any] = XCLIPConfig.from_text_vision_configs(_lowercase , _lowercase )
if "large" in model_name:
UpperCAmelCase_ : str = 768
return config
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
UpperCAmelCase_ : Optional[int] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
UpperCAmelCase_ : Any = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
UpperCAmelCase_ : Any = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
UpperCAmelCase_ : List[str] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
UpperCAmelCase_ : List[Any] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
UpperCAmelCase_ : Any = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
UpperCAmelCase_ : int = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ : str = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
UpperCAmelCase_ : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ : List[str] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
UpperCAmelCase_ : Optional[Any] = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
UpperCAmelCase_ : Union[str, Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
UpperCAmelCase_ : Dict = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
UpperCAmelCase_ : List[Any] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
UpperCAmelCase_ : str = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ : List[str] = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
UpperCAmelCase_ : List[str] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ : int = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
UpperCAmelCase_ : Optional[int] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
UpperCAmelCase_ : Optional[int] = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Any = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
UpperCAmelCase_ : Optional[int] = key.split('''.''' )
if key.startswith('''visual''' ):
UpperCAmelCase_ : List[Any] = key_split[3]
UpperCAmelCase_ : Optional[int] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ : List[str] = val[
:dim, :
]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[int] = val[
-dim:, :
]
else:
UpperCAmelCase_ : List[Any] = val[
:dim
]
UpperCAmelCase_ : Any = val[
dim : dim * 2
]
UpperCAmelCase_ : List[str] = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ : Optional[int] = val[
:dim, :
]
UpperCAmelCase_ : List[Any] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[str] = val[
-dim:, :
]
else:
UpperCAmelCase_ : List[Any] = val[:dim]
UpperCAmelCase_ : List[Any] = val[
dim : dim * 2
]
UpperCAmelCase_ : Optional[Any] = val[-dim:]
elif key.startswith('''mit''' ):
UpperCAmelCase_ : Optional[Any] = key_split[2]
UpperCAmelCase_ : Tuple = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ : Optional[Any] = val[:dim, :]
UpperCAmelCase_ : Optional[int] = val[dim : dim * 2, :]
UpperCAmelCase_ : Optional[int] = val[-dim:, :]
else:
UpperCAmelCase_ : List[str] = val[:dim]
UpperCAmelCase_ : List[Any] = val[dim : dim * 2]
UpperCAmelCase_ : int = val[-dim:]
else:
UpperCAmelCase_ : List[str] = key_split[2]
UpperCAmelCase_ : List[str] = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ : Tuple = val[:dim, :]
UpperCAmelCase_ : Any = val[
dim : dim * 2, :
]
UpperCAmelCase_ : str = val[-dim:, :]
else:
UpperCAmelCase_ : Optional[int] = val[:dim]
UpperCAmelCase_ : Tuple = val[
dim : dim * 2
]
UpperCAmelCase_ : List[Any] = val[-dim:]
else:
UpperCAmelCase_ : int = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ : Dict = val.T
UpperCAmelCase_ : Tuple = val
return orig_state_dict
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
UpperCAmelCase_ : Tuple = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
UpperCAmelCase_ : Union[str, Any] = '''eating_spaghetti.npy'''
elif num_frames == 32:
UpperCAmelCase_ : Optional[Any] = '''eating_spaghetti_32_frames.npy'''
UpperCAmelCase_ : List[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=_lowercase , repo_type='''dataset''' , )
UpperCAmelCase_ : Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase=None , _lowercase=False ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
UpperCAmelCase_ : Optional[int] = model_to_url[model_name]
UpperCAmelCase_ : Dict = 8
if "16-frames" in model_name:
UpperCAmelCase_ : List[Any] = 16
elif "shot" in model_name:
UpperCAmelCase_ : Tuple = 32
UpperCAmelCase_ : Any = get_xclip_config(_lowercase , _lowercase )
UpperCAmelCase_ : str = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ : int = '''pytorch_model.bin'''
gdown.cached_download(_lowercase , _lowercase , quiet=_lowercase )
UpperCAmelCase_ : Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' )['''model''']
else:
UpperCAmelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowercase )['''model''']
UpperCAmelCase_ : Dict = convert_state_dict(_lowercase , _lowercase )
UpperCAmelCase_ : int = XCLIPModel(_lowercase )
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = model.load_state_dict(_lowercase , strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ : Optional[Any] = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
UpperCAmelCase_ : List[str] = VideoMAEImageProcessor(size=_lowercase )
UpperCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ : Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ : List[Any] = XCLIPProcessor(image_processor=_lowercase , tokenizer=_lowercase )
UpperCAmelCase_ : Dict = prepare_video(_lowercase )
UpperCAmelCase_ : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=_lowercase , return_tensors='''pt''' , padding=_lowercase )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ : str = model(**_lowercase )
# Verify outputs
UpperCAmelCase_ : Dict = outputs.logits_per_video
UpperCAmelCase_ : Union[str, Any] = logits_per_video.softmax(dim=1 )
print('''Probs:''' , _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ : Tuple = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ : Any = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ : Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ : str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ : Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ : List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ : List[Any] = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ : str = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ : Dict = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ : Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(_lowercase , organization='''nielsr''' )
processor.push_to_hub(_lowercase , organization='''nielsr''' )
slow_tokenizer.push_to_hub(_lowercase , organization='''nielsr''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 30 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> Dict:
SCREAMING_SNAKE_CASE_ = 0
if start < end:
SCREAMING_SNAKE_CASE_ = randint(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = a[end]
SCREAMING_SNAKE_CASE_ = a[pivot]
SCREAMING_SNAKE_CASE_ = temp
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _in_place_partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += _in_place_quick_sort(__UpperCAmelCase , __UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(__UpperCAmelCase , p + 1 , __UpperCAmelCase )
return count
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = randint(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = a[end]
SCREAMING_SNAKE_CASE_ = a[pivot]
SCREAMING_SNAKE_CASE_ = temp
SCREAMING_SNAKE_CASE_ = start - 1
for index in range(__UpperCAmelCase , __UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE_ = new_pivot_index + 1
SCREAMING_SNAKE_CASE_ = a[new_pivot_index]
SCREAMING_SNAKE_CASE_ = a[index]
SCREAMING_SNAKE_CASE_ = temp
SCREAMING_SNAKE_CASE_ = a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE_ = a[end]
SCREAMING_SNAKE_CASE_ = temp
return new_pivot_index + 1, count
lowerCamelCase__ : List[Any] = TemporaryFile()
lowerCamelCase__ : Optional[Any] = 100 # 1000 elements are to be sorted
lowerCamelCase__ , lowerCamelCase__ : List[str] = 0, 1 # mean and standard deviation
lowerCamelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
lowerCamelCase__ : Union[str, Any] = np.load(outfile)
lowerCamelCase__ : Union[str, Any] = len(M) - 1
lowerCamelCase__ : Dict = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 31 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCAmelCase = MobileBertForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
_UpperCAmelCase = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 32 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'vivit'
def __init__( self:Tuple , _a:Any=2_24 , _a:Tuple=32 , _a:Optional[Any]=[2, 16, 16] , _a:str=3 , _a:Union[str, Any]=7_68 , _a:str=12 , _a:str=12 , _a:str=30_72 , _a:str="gelu_fast" , _a:Any=0.0 , _a:int=0.0 , _a:str=0.02 , _a:List[str]=1e-06 , _a:int=True , **_a:int , ):
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = num_frames
snake_case__ = tubelet_size
snake_case__ = num_channels
snake_case__ = qkv_bias
super().__init__(**_a )
| 33 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('fixtures/test_sentencepiece.model')
SCREAMING_SNAKE_CASE_ = {'target_lang': 'fi', 'source_lang': 'en'}
SCREAMING_SNAKE_CASE_ = '>>zh<<'
SCREAMING_SNAKE_CASE_ = 'Helsinki-NLP/'
if is_torch_available():
SCREAMING_SNAKE_CASE_ = 'pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE_ = 'tf'
else:
SCREAMING_SNAKE_CASE_ = 'jax'
@require_sentencepiece
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = MarianTokenizer
A_ = False
A_ = True
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().setUp()
UpperCamelCase = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = Path(self.tmpdirname)
save_json(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab'''])
save_json(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''source_spm'''])
copyfile(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''target_spm'''])
UpperCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = '''</s>'''
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_) , lowerCamelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_) , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<pad>''')
self.assertEqual(len(lowerCamelCase_) , 9)
def UpperCAmelCase__ ( self) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de')
UpperCamelCase = en_de_tokenizer(['''I am a small frog'''] , return_tensors=lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(lowerCamelCase_ , batch.input_ids[0])
UpperCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCamelCase_)
UpperCamelCase = [x.name for x in Path(lowerCamelCase_).glob('''*''')]
self.assertIn('''source.spm''' , lowerCamelCase_)
MarianTokenizer.from_pretrained(lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tok(
['''I am a small frog''' * 1_0_0_0, '''I am a small frog'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual(batch.input_ids.shape , (2, 5_1_2))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0))
@slow
def UpperCAmelCase__ ( self) -> List[str]:
# fmt: off
UpperCamelCase = {'''input_ids''': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''')
UpperCamelCase = '''Tämä on testi'''
UpperCamelCase = '''This is a test'''
UpperCamelCase = [7_6, 7, 2_0_4_7, 2]
UpperCamelCase = [6_9, 1_2, 1_1, 9_4_0, 2]
UpperCamelCase = tokenizer(lowerCamelCase_).input_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = tokenizer(text_target=lowerCamelCase_).input_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_) | 34 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
def a ( A__ = 1_0_0_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
from collections import defaultdict
from math import gcd
def UpperCamelCase_ ( __a = 1_500_000 ) -> int:
a__ : defaultdict = defaultdict(__a )
a__ : Optional[int] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __a , 2 ):
if gcd(__a , __a ) > 1:
continue
a__ : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__a , limit + 1 , __a ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 0 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = CodeGenTokenizer
lowerCamelCase__ = CodeGenTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {'''add_prefix_space''': True}
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
snake_case__ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : str = {"""unk_token""": """<unk>"""}
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : Tuple = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : str = """lower newer"""
snake_case__ : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Union[str, Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case__ : Any = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
snake_case__ : int = self.get_tokenizer()
snake_case__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = """lower newer"""
# Testing tokenization
snake_case__ : Optional[Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
snake_case__ : Any = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
snake_case__ : str = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : int = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing the unknown token
snake_case__ : Dict = tokens + [rust_tokenizer.unk_token]
snake_case__ : int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Simple input
snake_case__ : str = """This is a simple input"""
snake_case__ : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case__ : Optional[int] = ("""This is a simple input""", """This is a pair""")
snake_case__ : Union[str, Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Simple input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Simple input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" , )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Pair input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" , )
def __UpperCamelCase ( self ):
snake_case__ : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
snake_case__ : int = """This is a simple input"""
snake_case__ : List[Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
snake_case__ : List[str] = ("""This is a simple input""", """This is a pair""")
snake_case__ : List[str] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
snake_case__ : Optional[Any] = tokenizer.pad_token_id
snake_case__ : int = tokenizer(__SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=3_0 , return_tensors="""np""" )
snake_case__ : str = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
snake_case__ : Tuple = tokenizer(*__SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=6_0 , return_tensors="""np""" )
snake_case__ : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __UpperCamelCase ( self ):
snake_case__ : str = """$$$"""
snake_case__ : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__SCREAMING_SNAKE_CASE , add_bos_token=__SCREAMING_SNAKE_CASE )
snake_case__ : int = """This is a simple input"""
snake_case__ : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case__ : List[str] = tokenizer.bos_token_id
snake_case__ : List[str] = tokenizer(__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case__ : str = tokenizer.decode(out_s.input_ids )
snake_case__ : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
snake_case__ : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
snake_case__ : List[str] = """\nif len_a > len_b: result = a\nelse: result = b"""
snake_case__ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
snake_case__ : Any = tokenizer.decode(__SCREAMING_SNAKE_CASE , truncate_before_pattern=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
| 38 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 0 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCamelCase ( snake_case__ : str ) -> None:
UpperCamelCase , UpperCamelCase : Tuple = analyze_text(snake_case__ )
UpperCamelCase : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase : List[str] = sum(single_char_strings.values() )
# one length string
UpperCamelCase : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase : Tuple = single_char_strings[ch]
UpperCamelCase : Any = my_str / all_sum
my_fir_sum += prob * math.loga(snake_case__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
UpperCamelCase : List[str] = sum(two_char_strings.values() )
UpperCamelCase : Union[str, Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase : List[Any] = cha + cha
if sequence in two_char_strings:
UpperCamelCase : List[str] = two_char_strings[sequence]
UpperCamelCase : Dict = int(snake_case__ ) / all_sum
my_sec_sum += prob * math.loga(snake_case__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def UpperCamelCase ( snake_case__ : str ) -> tuple[dict, dict]:
UpperCamelCase : List[str] = Counter() # type: ignore
UpperCamelCase : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(snake_case__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCamelCase ( ) -> Optional[int]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 40 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.