code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = "informer"
_SCREAMING_SNAKE_CASE : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__(self : Tuple , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : str = "student_t" , snake_case_ : str = "nll" , snake_case_ : int = 1 , snake_case_ : List[int] = None , snake_case_ : Optional[Union[str, bool]] = "mean" , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : Optional[List[int]] = None , snake_case_ : Optional[List[int]] = None , snake_case_ : int = 6_4 , snake_case_ : int = 3_2 , snake_case_ : int = 3_2 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : bool = True , snake_case_ : str = "gelu" , snake_case_ : float = 0.05 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : int = 1_0_0 , snake_case_ : float = 0.02 , snake_case_ : Optional[int]=True , snake_case_ : str = "prob" , snake_case_ : int = 5 , snake_case_ : bool = True , **snake_case_ : Union[str, Any] , ):
# time series specific configuration
__a : Tuple = prediction_length
__a : Union[str, Any] = context_length or prediction_length
__a : List[str] = distribution_output
__a : Any = loss
__a : str = input_size
__a : Union[str, Any] = num_time_features
__a : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : Union[str, Any] = scaling
__a : Optional[Any] = num_dynamic_real_features
__a : Tuple = num_static_real_features
__a : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__a : Tuple = cardinality
else:
__a : Any = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__a : Dict = embedding_dimension
else:
__a : Optional[int] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : Any = num_parallel_samples
# Transformer architecture configuration
__a : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
__a : int = d_model
__a : List[str] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : int = decoder_ffn_dim
__a : str = encoder_layers
__a : Any = decoder_layers
__a : List[Any] = dropout
__a : List[Any] = attention_dropout
__a : Tuple = activation_dropout
__a : Dict = encoder_layerdrop
__a : Tuple = decoder_layerdrop
__a : Any = activation_function
__a : List[Any] = init_std
__a : int = use_cache
# Informer
__a : Optional[Any] = attention_type
__a : str = sampling_factor
__a : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCAmelCase (self : Union[str, Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 521 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
__a : Any = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase__ ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = StableDiffusionLatentUpscalePipeline
_SCREAMING_SNAKE_CASE : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
_SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE : Optional[int] = frozenset([] )
_SCREAMING_SNAKE_CASE : Optional[int] = True
@property
def lowerCAmelCase (self : Optional[int] ):
__a : Union[str, Any] = 1
__a : Dict = 4
__a : int = (1_6, 1_6)
__a : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ )
return image
def lowerCAmelCase (self : int ):
torch.manual_seed(0 )
__a : Dict = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=snake_case_ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=snake_case_ , only_cross_attention=snake_case_ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
__a : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
__a : Dict = EulerDiscreteScheduler(prediction_type='''sample''' )
__a : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''quick_gelu''' , projection_dim=5_1_2 , )
__a : int = CLIPTextModel(snake_case_ )
__a : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a : Tuple = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase (self : List[str] , snake_case_ : Tuple , snake_case_ : List[Any]=0 ):
if str(snake_case_ ).startswith('''mps''' ):
__a : Any = torch.manual_seed(snake_case_ )
else:
__a : Optional[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__a : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase (self : Tuple ):
__a : Optional[int] = '''cpu'''
__a : Union[str, Any] = self.get_dummy_components()
__a : Any = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__a : int = self.get_dummy_inputs(snake_case_ )
__a : Dict = pipe(**snake_case_ ).images
__a : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
__a : List[str] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
__a : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
def lowerCAmelCase (self : Tuple ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def lowerCAmelCase (self : Any ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase (self : Optional[Any] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def lowerCAmelCase (self : List[str] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def lowerCAmelCase (self : Tuple ):
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Optional[int] ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Union[str, Any] ):
__a : List[Any] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__a : List[str] = self.get_dummy_components()
__a : List[Any] = self.pipeline_class(**snake_case_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__a : Optional[Any] = self.get_dummy_inputs(snake_case_ )
__a : List[Any] = 2
__a : str = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__a : Union[str, Any] = getattr(snake_case_ , scheduler_enum.name )
__a : Any = scheduler_cls.from_config(pipe.scheduler.config )
__a : Any = pipe(**snake_case_ )[0]
outputs.append(snake_case_ )
assert check_same_shape(snake_case_ )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase (self : Union[str, Any] ):
__a : Union[str, Any] = torch.manual_seed(3_3 )
__a : List[Any] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__a : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : int = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__a : Dict = pipe(snake_case_ , generator=snake_case_ , output_type='''latent''' ).images
__a : Any = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type='''np''' , ).images[0]
__a : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def lowerCAmelCase (self : List[Any] ):
__a : int = torch.manual_seed(3_3 )
__a : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : Optional[int] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__a : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
__a : Any = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type='''np''' , ).images[0]
__a : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 521 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
lowercase_ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _UpperCamelCase ( self , UpperCAmelCase_=0 ) -> int:
lowerCamelCase : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCAmelCase_ ) )
lowerCamelCase : Any = np.random.RandomState(UpperCAmelCase_ )
lowerCamelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self ) -> str:
lowerCamelCase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : List[str] = self.get_dummy_inputs()
lowerCamelCase : Union[str, Any] = pipe(**UpperCAmelCase_ ).images
lowerCamelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : Tuple = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ) -> int:
lowerCamelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : int = self.get_dummy_inputs()
lowerCamelCase : Dict = pipe(**UpperCAmelCase_ ).images
lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : Optional[Any] = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ) -> Tuple:
lowerCamelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# warmup pass to apply optimizations
lowerCamelCase : int = pipe(**self.get_dummy_inputs() )
lowerCamelCase : Tuple = self.get_dummy_inputs()
lowerCamelCase : Tuple = pipe(**UpperCAmelCase_ ).images
lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : int = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ) -> str:
lowerCamelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : Dict = self.get_dummy_inputs()
lowerCamelCase : Dict = pipe(**UpperCAmelCase_ ).images
lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : str = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : List[Any] = self.get_dummy_inputs()
lowerCamelCase : Tuple = pipe(**UpperCAmelCase_ ).images
lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : Union[str, Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : int = self.get_dummy_inputs()
lowerCamelCase : Union[str, Any] = pipe(**UpperCAmelCase_ ).images
lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : int = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
@property
def _UpperCamelCase ( self ) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : int = ort.SessionOptions()
lowerCamelCase : List[str] = False
return options
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCamelCase : Dict = init_image.resize((768, 512) )
# using the PNDM scheduler by default
lowerCamelCase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = 'A fantasy landscape, trending on artstation'
lowerCamelCase : str = np.random.RandomState(0 )
lowerCamelCase : Optional[Any] = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='np' , )
lowerCamelCase : Optional[int] = output.images
lowerCamelCase : List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowerCamelCase : Union[str, Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCamelCase : Any = init_image.resize((768, 512) )
lowerCamelCase : List[str] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCamelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : str = 'A fantasy landscape, trending on artstation'
lowerCamelCase : List[str] = np.random.RandomState(0 )
lowerCamelCase : Any = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='np' , )
lowerCamelCase : Tuple = output.images
lowerCamelCase : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowerCamelCase : int = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 708 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase , lowerCamelCase : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase , lowerCamelCase : Tuple = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase_ , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase : Optional[Any] = controlnet_params
lowerCamelCase : Dict = 'bird'
lowerCamelCase : Optional[int] = jax.device_count()
lowerCamelCase : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCamelCase : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase : int = jax.random.split(UpperCAmelCase_ , jax.device_count() )
lowerCamelCase : Union[str, Any] = replicate(UpperCAmelCase_ )
lowerCamelCase : Tuple = shard(UpperCAmelCase_ )
lowerCamelCase : Tuple = shard(UpperCAmelCase_ )
lowerCamelCase : Tuple = pipe(
prompt_ids=UpperCAmelCase_ , image=UpperCAmelCase_ , params=UpperCAmelCase_ , prng_seed=UpperCAmelCase_ , num_inference_steps=50 , jit=UpperCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase : Any = images[0, 253:256, 253:256, -1]
lowerCamelCase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase : List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase , lowerCamelCase : Dict = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase , lowerCamelCase : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase_ , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase : int = controlnet_params
lowerCamelCase : Dict = 'Chef in the kitchen'
lowerCamelCase : Dict = jax.device_count()
lowerCamelCase : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCamelCase : int = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase : int = jax.random.PRNGKey(0 )
lowerCamelCase : str = jax.random.split(UpperCAmelCase_ , jax.device_count() )
lowerCamelCase : int = replicate(UpperCAmelCase_ )
lowerCamelCase : int = shard(UpperCAmelCase_ )
lowerCamelCase : Optional[int] = shard(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = pipe(
prompt_ids=UpperCAmelCase_ , image=UpperCAmelCase_ , params=UpperCAmelCase_ , prng_seed=UpperCAmelCase_ , num_inference_steps=50 , jit=UpperCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase : Optional[Any] = images[0, 253:256, 253:256, -1]
lowerCamelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase : List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 133 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_SCREAMING_SNAKE_CASE = 250_004
_SCREAMING_SNAKE_CASE = 250_020
@require_sentencepiece
@require_tokenizers
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = MBartaaTokenizer
lowerCamelCase :Tuple = MBartaaTokenizerFast
lowerCamelCase :Dict = True
lowerCamelCase :Union[str, Any] = True
def UpperCAmelCase ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A = MBartaaTokenizer(lowerCAmelCase_ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[Any]:
_A = """<s>"""
_A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCAmelCase_ ) , 10_54 )
def UpperCAmelCase ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def UpperCAmelCase ( self ) -> List[str]:
_A = MBartaaTokenizer(lowerCAmelCase_ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=lowerCAmelCase_ )
_A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
_A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_A = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
# fmt: off
_A = {"""input_ids""": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase ( self ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_A = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_A = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(lowerCAmelCase_ )
_A = tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_A = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(lowerCAmelCase_ )
_A = tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=True
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
_A = tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(lowerCAmelCase_ )
_A = tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=False
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
_A = tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(lowerCAmelCase_ )
_A = tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = '''facebook/mbart-large-50-one-to-many-mmt'''
lowerCamelCase :str = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase :List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase :int = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def UpperCAmelCase ( cls ) -> List[str]:
_A = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
_A = 1
return cls
def UpperCAmelCase ( self ) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 25_00_38 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
_A = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
_A = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_A = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
_A = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , lowerCAmelCase_ )
_A = 10
_A = self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCAmelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_53, 25_00_01] )
def UpperCAmelCase ( self ) -> List[str]:
_A = tempfile.mkdtemp()
_A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
_A = MBartaaTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def UpperCAmelCase ( self ) -> int:
_A = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
_A = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_A = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase ( self ) -> Dict:
_A = self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
_A = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
_A = targets["""input_ids"""]
_A = shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase ( self ) -> Dict:
_A = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[25_00_04, 62, 30_34, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 401 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase :ClassVar[Features] = Features({'''text''': Value('''string''' )} )
lowerCamelCase :ClassVar[Features] = Features({'''labels''': ClassLabel} )
lowerCamelCase :str = "text"
lowerCamelCase :str = "labels"
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
_A = copy.deepcopy(self )
_A = self.label_schema.copy()
_A = features[self.label_column]
_A = label_schema
return task_template
@property
def UpperCAmelCase ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 401 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = "blip_2_vision_model"
def __init__( self: List[str] , _lowerCamelCase: Optional[int]=14_08 , _lowerCamelCase: Any=61_44 , _lowerCamelCase: Optional[Any]=39 , _lowerCamelCase: str=16 , _lowerCamelCase: Union[str, Any]=2_24 , _lowerCamelCase: List[str]=14 , _lowerCamelCase: str="gelu" , _lowerCamelCase: Optional[Any]=0.0_00_01 , _lowerCamelCase: int=0.0 , _lowerCamelCase: Optional[int]=1E-10 , _lowerCamelCase: Any=True , **_lowerCamelCase: Tuple , ):
super().__init__(**_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = qkv_bias
@classmethod
def _A ( cls: Dict , _lowerCamelCase: Union[str, os.PathLike] , **_lowerCamelCase: Tuple ):
cls._set_token_in_kwargs(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
SCREAMING_SNAKE_CASE_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = "blip_2_qformer"
def __init__( self: Dict , _lowerCamelCase: Optional[int]=3_05_22 , _lowerCamelCase: Any=7_68 , _lowerCamelCase: List[str]=12 , _lowerCamelCase: str=12 , _lowerCamelCase: str=30_72 , _lowerCamelCase: List[str]="gelu" , _lowerCamelCase: Optional[int]=0.1 , _lowerCamelCase: Union[str, Any]=0.1 , _lowerCamelCase: List[Any]=5_12 , _lowerCamelCase: Union[str, Any]=0.02 , _lowerCamelCase: Union[str, Any]=1E-12 , _lowerCamelCase: Union[str, Any]=0 , _lowerCamelCase: str="absolute" , _lowerCamelCase: int=2 , _lowerCamelCase: List[str]=14_08 , **_lowerCamelCase: List[str] , ):
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = cross_attention_frequency
SCREAMING_SNAKE_CASE_ = encoder_hidden_size
@classmethod
def _A ( cls: Optional[int] , _lowerCamelCase: Union[str, os.PathLike] , **_lowerCamelCase: Optional[Any] ):
cls._set_token_in_kwargs(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
SCREAMING_SNAKE_CASE_ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = "blip-2"
SCREAMING_SNAKE_CASE__ : List[str] = True
def __init__( self: List[str] , _lowerCamelCase: Dict=None , _lowerCamelCase: Any=None , _lowerCamelCase: Any=None , _lowerCamelCase: List[str]=32 , **_lowerCamelCase: Tuple ):
super().__init__(**_lowerCamelCase )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
SCREAMING_SNAKE_CASE_ = BlipaVisionConfig(**_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = BlipaQFormerConfig(**_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[text_model_type](**_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE_ = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE_ = num_query_tokens
SCREAMING_SNAKE_CASE_ = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = 1.0
SCREAMING_SNAKE_CASE_ = 0.02
@classmethod
def _A ( cls: List[str] , _lowerCamelCase: BlipaVisionConfig , _lowerCamelCase: BlipaQFormerConfig , _lowerCamelCase: PretrainedConfig , **_lowerCamelCase: str , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowerCamelCase , )
def _A ( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 89 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __magic_name__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping]):
'''simple docstring'''
def __init__( self: int , _lowerCamelCase: int=None , **_lowerCamelCase: List[Any] ):
super().__init__(features=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _A ( self: Union[str, Any] , _lowerCamelCase: Optional[Any] ):
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ) and column:
if all(
isinstance(_lowerCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_lowerCamelCase )
return column
def _A ( self: Tuple , _lowerCamelCase: List[str] ):
import torch
if isinstance(_lowerCamelCase , (str, bytes, type(_lowerCamelCase )) ):
return value
elif isinstance(_lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE_ = {}
if isinstance(_lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
SCREAMING_SNAKE_CASE_ = {'''dtype''': torch.intaa}
elif isinstance(_lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE_ = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_lowerCamelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCamelCase )
return torch.tensor(_lowerCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def _A ( self: Union[str, Any] , _lowerCamelCase: Dict ):
import torch
# support for torch, tf, jax etc.
if hasattr(_lowerCamelCase , '''__array__''' ) and not isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_lowerCamelCase ) for substruct in data_struct] )
elif isinstance(_lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(_lowerCamelCase )
def _A ( self: str , _lowerCamelCase: dict ):
return map_nested(self._recursive_tensorize , _lowerCamelCase , map_list=_lowerCamelCase )
def _A ( self: Optional[int] , _lowerCamelCase: pa.Table ):
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_row(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_row(_lowerCamelCase )
return self.recursive_tensorize(_lowerCamelCase )
def _A ( self: Optional[Any] , _lowerCamelCase: pa.Table ):
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_column(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_column(_lowerCamelCase , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self._consolidate(_lowerCamelCase )
return column
def _A ( self: Optional[Any] , _lowerCamelCase: pa.Table ):
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_batch(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_batch(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(_lowerCamelCase )
for column_name in batch:
SCREAMING_SNAKE_CASE_ = self._consolidate(batch[column_name] )
return batch
| 89 | 1 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCamelCase ( a_ : Any , a_ : Union[str, Any]=False ) -> int:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = create_model(
'''HTSAT-tiny''' , '''roberta''' , a_ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=a_ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase ( a_ : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE :Dict = {}
__SCREAMING_SNAKE_CASE :Any = r'''.*sequential.(\d+).*'''
__SCREAMING_SNAKE_CASE :Dict = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE :str = key.replace(a_ , a_ )
if re.match(a_ , a_ ):
# replace sequential layers with list
__SCREAMING_SNAKE_CASE :Tuple = re.match(a_ , a_ ).group(1 )
__SCREAMING_SNAKE_CASE :str = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(a_ )//3}.linear.''' )
elif re.match(a_ , a_ ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = int(re.match(a_ , a_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__SCREAMING_SNAKE_CASE :Union[str, Any] = 1 if projecton_layer == 0 else 2
__SCREAMING_SNAKE_CASE :Tuple = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__SCREAMING_SNAKE_CASE :Union[str, Any] = value
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv.size(0 ) // 3
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[:qkv_dim]
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
__SCREAMING_SNAKE_CASE :str = mixed_qkv[qkv_dim * 2 :]
__SCREAMING_SNAKE_CASE :Dict = query_layer
__SCREAMING_SNAKE_CASE :Tuple = key_layer
__SCREAMING_SNAKE_CASE :str = value_layer
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = value
return model_state_dict
def __lowerCamelCase ( a_ : Optional[int] , a_ : Dict , a_ : Dict , a_ : List[Any]=False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = init_clap(a_ , enable_fusion=a_ )
clap_model.eval()
__SCREAMING_SNAKE_CASE :Optional[Any] = clap_model.state_dict()
__SCREAMING_SNAKE_CASE :Tuple = rename_state_dict(a_ )
__SCREAMING_SNAKE_CASE :Any = ClapConfig()
__SCREAMING_SNAKE_CASE :Tuple = enable_fusion
__SCREAMING_SNAKE_CASE :Dict = ClapModel(a_ )
# ignore the spectrogram embedding layer
model.load_state_dict(a_ , strict=a_ )
model.save_pretrained(a_ )
transformers_config.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowerCamelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 498 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=30 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=37 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=10 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=2 ,) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = parent
__SCREAMING_SNAKE_CASE :str = batch_size
__SCREAMING_SNAKE_CASE :Optional[Any] = image_size
__SCREAMING_SNAKE_CASE :List[Any] = patch_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_channels
__SCREAMING_SNAKE_CASE :Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE :Any = use_labels
__SCREAMING_SNAKE_CASE :List[Any] = hidden_size
__SCREAMING_SNAKE_CASE :str = num_hidden_layers
__SCREAMING_SNAKE_CASE :Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE :List[str] = intermediate_size
__SCREAMING_SNAKE_CASE :Tuple = hidden_act
__SCREAMING_SNAKE_CASE :Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE :Any = initializer_range
__SCREAMING_SNAKE_CASE :int = scope
__SCREAMING_SNAKE_CASE :List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__SCREAMING_SNAKE_CASE :Optional[Any] = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_patches + 2
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :Tuple = None
if self.use_labels:
__SCREAMING_SNAKE_CASE :int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE :Any = 1
__SCREAMING_SNAKE_CASE :List[Any] = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE :Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE :List[Any] = 1
__SCREAMING_SNAKE_CASE :Union[str, Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :Any = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = config_and_inputs
__SCREAMING_SNAKE_CASE :Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE( A , A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = TFDeiTModelTester(self )
__SCREAMING_SNAKE_CASE :str = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=37 )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :str = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__SCREAMING_SNAKE_CASE :List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense ) )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :str = model_class(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE :List[Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE :Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :str = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( ) -> Tuple:
__SCREAMING_SNAKE_CASE :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
__SCREAMING_SNAKE_CASE :int = self.default_image_processor
__SCREAMING_SNAKE_CASE :str = prepare_img()
__SCREAMING_SNAKE_CASE :int = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='''tf''' )
# forward pass
__SCREAMING_SNAKE_CASE :int = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__SCREAMING_SNAKE_CASE :Any = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4 ) ) | 498 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Dict =logging.get_logger(__name__)
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = DPTConfig()
if "large" in checkpoint_url:
__A = 1024
__A = 4096
__A = 24
__A = 16
__A = [5, 11, 17, 23]
__A = [256, 512, 1024, 1024]
__A = (1, 384, 384)
if "ade" in checkpoint_url:
__A = True
__A = 150
__A = """huggingface/label-files"""
__A = """ade20k-id2label.json"""
__A = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
__A = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = [1, 150, 480, 480]
return config, expected_shape
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__A = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__A = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__A = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
__A = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__A = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__A = name.replace("proj" , "projection" )
if "blocks" in name:
__A = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__A = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__A = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
__A = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__A = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__A = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__A = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__A = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__A = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__A = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__A = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__A = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__A = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
__A = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__A = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__A = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__A = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__A = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__A = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__A = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__A = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__A = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__A = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__A = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__A = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__A = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__A = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__A = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__A = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__A = name.replace("pretrained" , "dpt" )
if "bn" in name:
__A = name.replace("bn" , "batch_norm" )
if "head" in name:
__A = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__A = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__A = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
__A = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[: config.hidden_size, :]
__A = in_proj_bias[: config.hidden_size]
__A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A = in_proj_weight[
-config.hidden_size :, :
]
__A = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( ):
'''simple docstring'''
__A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
__A = get_dpt_config(lowerCAmelCase__ )
# load original state_dict from URL
__A = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
__A = state_dict.pop(lowerCAmelCase__ )
__A = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# load HuggingFace model
__A = DPTForSemanticSegmentation(lowerCAmelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# Check outputs on an image
__A = 480 if """ade""" in checkpoint_url else 384
__A = DPTImageProcessor(size=lowerCAmelCase__ )
__A = prepare_img()
__A = image_processor(lowerCAmelCase__ , return_tensors="pt" )
# forward pass
__A = model(**lowerCAmelCase__ ).logits if """ade""" in checkpoint_url else model(**lowerCAmelCase__ ).predicted_depth
# Assert logits
__A = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__A = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(lowerCAmelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase__ )
)
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
snake_case_ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
snake_case_ : List[str] =parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 714 |
from __future__ import annotations
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not nums:
return 0
__A = nums[0]
__A = 0
for num in nums[1:]:
__A , __A = (
max_excluding + num,
max(lowerCAmelCase__ , lowerCAmelCase__ ),
)
return max(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 41 |
import torch
from diffusers import DiffusionPipeline
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a ) -> List[str]:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
def __call__( self ) -> Tuple:
_A : List[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_A : Tuple = 1
_A : List[str] = self.unet(_a , _a ).sample
_A : List[Any] = self.scheduler.step(_a , _a , _a ).prev_sample
_A : Union[str, Any] = scheduler_output - scheduler_output + torch.ones_like(_a )
return result
| 307 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =["""input_features""", """is_longer"""]
def __init__( self , snake_case=6_4 , snake_case=4_8_0_0_0 , snake_case=4_8_0 , snake_case=1_0 , snake_case=1_0_2_4 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4_0_0_0 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ) -> List[Any]:
'''simple docstring'''
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , )
_UpperCAmelCase : Any =top_db
_UpperCAmelCase : str =truncation
_UpperCAmelCase : List[Any] =padding
_UpperCAmelCase : List[Any] =fft_window_size
_UpperCAmelCase : int =(fft_window_size >> 1) + 1
_UpperCAmelCase : Any =hop_length
_UpperCAmelCase : Any =max_length_s
_UpperCAmelCase : Union[str, Any] =max_length_s * sampling_rate
_UpperCAmelCase : Optional[int] =sampling_rate
_UpperCAmelCase : Dict =frequency_min
_UpperCAmelCase : str =frequency_max
_UpperCAmelCase : Optional[Any] =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , )
_UpperCAmelCase : List[Any] =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , )
def lowerCAmelCase ( self) -> Dict[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =copy.deepcopy(self.__dict__)
_UpperCAmelCase : Optional[Any] =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCAmelCase ( self , snake_case , snake_case = None) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Dict =spectrogram(
snake_case , window_function(self.fft_window_size , 'hann') , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , )
return log_mel_spectrogram.T
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : int =[0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : List[str] =[0]
# randomly choose index for each part
_UpperCAmelCase : Optional[int] =np.random.choice(ranges[0])
_UpperCAmelCase : Any =np.random.choice(ranges[1])
_UpperCAmelCase : Tuple =np.random.choice(ranges[2])
_UpperCAmelCase : List[str] =mel[idx_front : idx_front + chunk_frames, :]
_UpperCAmelCase : List[Any] =mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCAmelCase : Optional[int] =mel[idx_back : idx_back + chunk_frames, :]
_UpperCAmelCase : str =torch.tensor(mel[None, None, :])
_UpperCAmelCase : int =torch.nn.functional.interpolate(
snake_case , size=[chunk_frames, 6_4] , mode='bilinear' , align_corners=snake_case)
_UpperCAmelCase : Optional[Any] =mel_shrink[0][0].numpy()
_UpperCAmelCase : Tuple =np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCAmelCase : int =True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCAmelCase : Dict =len(snake_case) - max_length
_UpperCAmelCase : Union[str, Any] =np.random.randint(0 , overflow + 1)
_UpperCAmelCase : List[str] =waveform[idx : idx + max_length]
_UpperCAmelCase : int =self._np_extract_fbank_features(snake_case , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
_UpperCAmelCase : Union[str, Any] =self._np_extract_fbank_features(snake_case , self.mel_filters)
_UpperCAmelCase : int =max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCAmelCase : List[Any] =mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCAmelCase : Optional[int] =np.stack([mel, mel, mel, mel] , axis=0)
_UpperCAmelCase : Optional[int] =False
else:
_UpperCAmelCase : Union[str, Any] =self._random_mel_fusion(snake_case , snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented")
else:
_UpperCAmelCase : Tuple =False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCAmelCase : Dict =int(max_length / len(snake_case))
_UpperCAmelCase : Optional[Any] =np.stack(np.tile(snake_case , n_repeat + 1))[:max_length]
if padding == "repeatpad":
_UpperCAmelCase : Any =int(max_length / len(snake_case))
_UpperCAmelCase : Dict =np.stack(np.tile(snake_case , snake_case))
_UpperCAmelCase : Union[str, Any] =np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0)
if truncation == "fusion":
_UpperCAmelCase : Any =self._np_extract_fbank_features(snake_case , self.mel_filters)
_UpperCAmelCase : Dict =np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
_UpperCAmelCase : int =self._np_extract_fbank_features(snake_case , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =truncation if truncation is not None else self.truncation
_UpperCAmelCase : Any =padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
_UpperCAmelCase : Any =isinstance(snake_case , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
_UpperCAmelCase : Union[str, Any] =is_batched_numpy or (
isinstance(snake_case , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_UpperCAmelCase : Tuple =[np.asarray(snake_case , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray):
_UpperCAmelCase : Any =np.asarray(snake_case , dtype=np.floataa)
elif isinstance(snake_case , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_UpperCAmelCase : Optional[int] =raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_UpperCAmelCase : Any =[np.asarray(snake_case)]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCAmelCase : Optional[Any] =[
self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case)
for waveform in raw_speech
]
_UpperCAmelCase : Optional[int] =[]
_UpperCAmelCase : List[Any] =[]
for mel, longer in padded_inputs:
input_mel.append(snake_case)
is_longer.append(snake_case)
if truncation == "fusion" and sum(snake_case) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCAmelCase : List[str] =np.random.randint(0 , len(snake_case))
_UpperCAmelCase : List[Any] =True
if isinstance(input_mel[0] , snake_case):
_UpperCAmelCase : int =[np.asarray(snake_case , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
_UpperCAmelCase : str =[[longer] for longer in is_longer]
_UpperCAmelCase : int ={'input_features': input_mel, 'is_longer': is_longer}
_UpperCAmelCase : Optional[int] =BatchFeature(snake_case)
if return_tensors is not None:
_UpperCAmelCase : List[str] =input_features.convert_to_tensors(snake_case)
return input_features
| 721 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : int =os.path.dirname(os.path.realpath(__lowerCamelCase ) )
_UpperCAmelCase : List[Any] =os.path.join(__lowerCamelCase , 'words.txt' )
_UpperCAmelCase : int =''
with open(__lowerCamelCase ) as f:
_UpperCAmelCase : Tuple =f.readline()
_UpperCAmelCase : List[str] =[word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase : Dict =[
word
for word in [sum(ord(__lowerCamelCase ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 331 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class lowerCAmelCase ( A ):
def __init__( self : Any , __lowercase : List[str] , __lowercase : Tuple ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase )
def __call__( self : Any ):
"""simple docstring"""
__lowercase =torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__lowercase =1
__lowercase =self.unet(__lowercase , __lowercase ).sample
__lowercase =self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
__lowercase =scheduler_output - scheduler_output + torch.ones_like(__lowercase )
return result
| 119 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
@add_end_docstrings(A )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "generated"
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def snake_case ( self : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : List[str]=None , __lowercase : int=None , __lowercase : str=None , __lowercase : List[Any]=None , **__lowercase : Union[str, Any] , ):
"""simple docstring"""
__lowercase ={}
if truncation is not None:
__lowercase =truncation
__lowercase =generate_kwargs
__lowercase ={}
if return_tensors is not None and return_type is None:
__lowercase =ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__lowercase =return_type
if clean_up_tokenization_spaces is not None:
__lowercase =clean_up_tokenization_spaces
if stop_sequence is not None:
__lowercase =self.tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
if len(__lowercase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowercase =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case ( self : List[Any] , __lowercase : int , __lowercase : int , __lowercase : int ):
"""simple docstring"""
return True
def snake_case ( self : Optional[Any] , *__lowercase : Optional[int] , __lowercase : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , __lowercase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
__lowercase =([prefix + arg for arg in args[0]],)
__lowercase =True
elif isinstance(args[0] , __lowercase ):
__lowercase =(prefix + args[0],)
__lowercase =False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
__lowercase =self.tokenizer(*__lowercase , padding=__lowercase , truncation=__lowercase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Any , *__lowercase : int , **__lowercase : Optional[Any] ):
"""simple docstring"""
__lowercase =super().__call__(*__lowercase , **__lowercase )
if (
isinstance(args[0] , __lowercase )
and all(isinstance(__lowercase , __lowercase ) for el in args[0] )
and all(len(__lowercase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def snake_case ( self : List[str] , __lowercase : Tuple , __lowercase : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **__lowercase : Dict ):
"""simple docstring"""
__lowercase =self._parse_and_tokenize(__lowercase , truncation=__lowercase , **__lowercase )
return inputs
def snake_case ( self : Optional[int] , __lowercase : Tuple , **__lowercase : Tuple ):
"""simple docstring"""
if self.framework == "pt":
__lowercase , __lowercase =model_inputs['input_ids'].shape
elif self.framework == "tf":
__lowercase , __lowercase =tf.shape(model_inputs['input_ids'] ).numpy()
__lowercase =generate_kwargs.get('min_length' , self.model.config.min_length )
__lowercase =generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(__lowercase , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
__lowercase =self.model.generate(**__lowercase , **__lowercase )
__lowercase =output_ids.shape[0]
if self.framework == "pt":
__lowercase =output_ids.reshape(__lowercase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__lowercase =tf.reshape(__lowercase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def snake_case ( self : str , __lowercase : Optional[Any] , __lowercase : Optional[int]=ReturnType.TEXT , __lowercase : Union[str, Any]=False ):
"""simple docstring"""
__lowercase =[]
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__lowercase ={f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
__lowercase ={
f'''{self.return_name}_text''': self.tokenizer.decode(
__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase , )
}
records.append(__lowercase )
return records
@add_end_docstrings(A )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "summary"
def __call__( self : Dict , *__lowercase : Dict , **__lowercase : Dict ):
"""simple docstring"""
return super().__call__(*__lowercase , **__lowercase )
def snake_case ( self : str , __lowercase : int , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(A )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "translation"
def snake_case ( self : int , __lowercase : int , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def snake_case ( self : int , *__lowercase : str , __lowercase : List[str]=TruncationStrategy.DO_NOT_TRUNCATE , __lowercase : List[Any]=None , __lowercase : Optional[int]=None ):
"""simple docstring"""
if getattr(self.tokenizer , '_build_translation_inputs' , __lowercase ):
return self.tokenizer._build_translation_inputs(
*__lowercase , return_tensors=self.framework , truncation=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase )
else:
return super()._parse_and_tokenize(*__lowercase , truncation=__lowercase )
def snake_case ( self : Optional[int] , __lowercase : List[str]=None , __lowercase : Optional[Any]=None , **__lowercase : int ):
"""simple docstring"""
__lowercase , __lowercase , __lowercase =super()._sanitize_parameters(**__lowercase )
if src_lang is not None:
__lowercase =src_lang
if tgt_lang is not None:
__lowercase =tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__lowercase =kwargs.get('task' , self.task )
__lowercase =task.split('_' )
if task and len(__lowercase ) == 4:
# translation, XX, to YY
__lowercase =items[1]
__lowercase =items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any] , *__lowercase : Any , **__lowercase : List[Any] ):
"""simple docstring"""
return super().__call__(*__lowercase , **__lowercase )
| 119 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = DDIMPipeline
__UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
__UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__UpperCAmelCase = False
def lowercase_ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : Union[str, Any] = {"unet": unet, "scheduler": scheduler}
return components
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
_UpperCamelCase : List[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = "cpu"
_UpperCamelCase : Optional[int] = self.get_dummy_components()
_UpperCamelCase : Union[str, Any] = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = pipe(**lowerCAmelCase__ ).images
_UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_UpperCamelCase : str = np.array(
[1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] )
_UpperCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def lowercase_ (self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowercase_ (self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def lowercase_ (self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowercase_ (self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = "google/ddpm-cifar10-32"
_UpperCamelCase : List[Any] = UNetaDModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Tuple = DDIMScheduler()
_UpperCamelCase : int = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddim.to(lowerCAmelCase__ )
ddim.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = ddim(generator=lowerCAmelCase__ , eta=0.0 , output_type="numpy" ).images
_UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : int = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = "google/ddpm-ema-bedroom-256"
_UpperCamelCase : Tuple = UNetaDModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = DDIMScheduler.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Dict = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddpm.to(lowerCAmelCase__ )
ddpm.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : Tuple = ddpm(generator=lowerCAmelCase__ , output_type="numpy" ).images
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCamelCase : Dict = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 716 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=[10, 20, 30, 40] , lowerCAmelCase__=[2, 2, 3, 2] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=["stage2", "stage3", "stage4"] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=None , ):
'''simple docstring'''
_UpperCamelCase : str = parent
_UpperCamelCase : Any = batch_size
_UpperCamelCase : str = image_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Union[str, Any] = num_stages
_UpperCamelCase : Any = hidden_sizes
_UpperCamelCase : Optional[Any] = depths
_UpperCamelCase : Union[str, Any] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : List[Any] = intermediate_size
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : int = num_labels
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Any = out_indices
_UpperCamelCase : Any = scope
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Optional[int] = None
if self.use_labels:
_UpperCamelCase : int = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def lowercase_ (self ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : str = ConvNextVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Optional[Any] = model(lowerCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : int = ConvNextVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Optional[int] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : str = model(lowerCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Any = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = ConvNextVaModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def lowercase_ (self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ (self ):
'''simple docstring'''
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def lowercase_ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def lowercase_ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def lowercase_ (self ):
'''simple docstring'''
pass
def lowercase_ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCamelCase : Dict = True
if model_class.__name__ in [
*get_values(lowerCAmelCase__ ),
*get_values(lowerCAmelCase__ ),
]:
continue
_UpperCamelCase : Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
_UpperCamelCase : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
_UpperCamelCase : str = model(**lowerCAmelCase__ ).loss
loss.backward()
def lowercase_ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCamelCase , _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCamelCase : Tuple = False
_UpperCamelCase : Union[str, Any] = True
if (
model_class.__name__
in [*get_values(lowerCAmelCase__ ), *get_values(lowerCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCamelCase : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
_UpperCamelCase : List[str] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
_UpperCamelCase : Dict = model(**lowerCAmelCase__ ).loss
loss.backward()
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class(lowerCAmelCase__ )
_UpperCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[Any] = [*signature.parameters.keys()]
_UpperCamelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : Any = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def lowercase_ (self ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Union[str, Any] = ConvNextVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __lowerCAmelCase ( ) -> List[str]:
_UpperCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ (self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : str = preprocessor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
_UpperCamelCase : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCamelCase : Tuple = torch.tensor([0.9996, 0.1966, -0.4386] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 239 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A_ : int = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase ) | 57 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : int = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 241 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
lowerCAmelCase = """"""
while len(SCREAMING_SNAKE_CASE ) % 3 != 0:
lowerCAmelCase = """0""" + bin_string
lowerCAmelCase = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCAmelCase = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE ) )
oct_string += str(SCREAMING_SNAKE_CASE )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self , lowercase = 2_000 , lowercase = 0.15 , lowercase = 0.01 , lowercase = 1_348.0 , lowercase = 1e-5 , lowercase = 1 , ) -> List[Any]:
# standard deviation of the initial noise distribution
lowerCAmelCase = sigma_max
# setable values
lowerCAmelCase = None
self.set_sigmas(lowercase , lowercase , lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase = None ) -> torch.FloatTensor:
return sample
def _snake_case ( self , lowercase , lowercase = None , lowercase = None ) -> Union[str, Any]:
lowerCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCAmelCase = torch.linspace(1 , lowercase , lowercase , device=lowercase )
def _snake_case ( self , lowercase , lowercase = None , lowercase = None , lowercase = None ) -> Dict:
lowerCAmelCase = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCAmelCase = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowercase , lowercase )
lowerCAmelCase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCAmelCase = torch.exp(torch.linspace(math.log(lowercase ) , math.log(lowercase ) , lowercase ) )
lowerCAmelCase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
lowerCAmelCase = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCAmelCase = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCAmelCase = timesteps.to(self.discrete_sigmas.device )
lowerCAmelCase = self.discrete_sigmas[timesteps].to(sample.device )
lowerCAmelCase = self.get_adjacent_sigma(lowercase , lowercase ).to(sample.device )
lowerCAmelCase = torch.zeros_like(lowercase )
lowerCAmelCase = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCAmelCase = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCAmelCase = diffusion.unsqueeze(-1 )
lowerCAmelCase = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCAmelCase = randn_tensor(
sample.shape , layout=sample.layout , generator=lowercase , device=sample.device , dtype=sample.dtype )
lowerCAmelCase = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCAmelCase = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowercase , prev_sample_mean=lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase = None , lowercase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCAmelCase = randn_tensor(sample.shape , layout=sample.layout , generator=lowercase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCAmelCase = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCAmelCase = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCAmelCase = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCAmelCase = step_size.unsqueeze(-1 )
lowerCAmelCase = sample + step_size * model_output
lowerCAmelCase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase = timesteps.to(original_samples.device )
lowerCAmelCase = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCAmelCase = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowercase ) * sigmas[:, None, None, None]
)
lowerCAmelCase = noise + original_samples
return noisy_samples
def __len__( self ) -> int:
return self.config.num_train_timesteps
| 393 | 0 |
from math import sqrt
def __magic_name__ ( lowercase_ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__ ( lowercase_ = 10001 ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowercase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowercase_ ):
count += 1
return number
if __name__ == "__main__":
print(F'{solution() = }')
| 606 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
UpperCamelCase = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase_ )
# Let's go
UpperCamelCase = parser.parse_args()
if not hasattr(lowercase_ , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase = args.func(lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 606 | 1 |
import os
from datetime import datetime as dt
from github import Github
snake_case = [
"good first issue",
"feature request",
"wip",
]
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = Github(os.environ["GITHUB_TOKEN"] )
_lowerCAmelCase : str = g.get_repo("huggingface/accelerate" )
_lowerCAmelCase : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
_lowerCAmelCase : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
_lowerCAmelCase : str = comments[0] if len(lowerCAmelCase__ ) > 0 else None
_lowerCAmelCase : List[Any] = dt.utcnow()
_lowerCAmelCase : int = (current_time - issue.updated_at).days
_lowerCAmelCase : Union[str, Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 716 | # Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self , _snake_case = True ):
_lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists
_lowerCAmelCase : List[Any] = directed
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
self.adj_list[destination_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
_lowerCAmelCase : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_snake_case )
_lowerCAmelCase : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCAmelCase : List[str] = [destination_vertex]
_lowerCAmelCase : Dict = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
_lowerCAmelCase : Any = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCAmelCase : List[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCAmelCase : Dict = [destination_vertex]
_lowerCAmelCase : List[str] = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 587 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class a__ ( lowerCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_SCREAMING_SNAKE_CASE : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
_SCREAMING_SNAKE_CASE : str = "question"
_SCREAMING_SNAKE_CASE : str = "context"
_SCREAMING_SNAKE_CASE : str = "answers"
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 245 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _A ( snake_case , snake_case , snake_case , snake_case , ) -> list[float]:
_lowercase , _lowercase : Union[str, Any] = coefficient_matrix.shape
_lowercase , _lowercase : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
_lowercase : Any = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(snake_case )
if colsa != 1:
_lowercase : Dict = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(snake_case )
if rowsa != rowsa:
_lowercase : int = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(snake_case )
if len(snake_case ) != rowsa:
_lowercase : Tuple = (
"Number of initial values must be equal to number of rows in coefficient "
F'''matrix but received {len(snake_case )} and {rowsa}'''
)
raise ValueError(snake_case )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
_lowercase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_lowercase , _lowercase : Dict = table.shape
strictly_diagonally_dominant(snake_case )
# Iterates the whole matrix for given number of times
for _ in range(snake_case ):
_lowercase : int = []
for row in range(snake_case ):
_lowercase : Tuple = 0
for col in range(snake_case ):
if col == row:
_lowercase : str = table[row][col]
elif col == cols - 1:
_lowercase : List[str] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowercase : List[str] = (temp + val) / denom
new_val.append(snake_case )
_lowercase : str = new_val
return [float(snake_case ) for i in new_val]
def _A ( snake_case ) -> bool:
_lowercase , _lowercase : Optional[int] = table.shape
_lowercase : Optional[Any] = True
for i in range(0 , snake_case ):
_lowercase : Dict = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class lowerCAmelCase ( snake_case ):
def __init__( self , a__=1 , a__=0 , a__=2 , a__=5_12 , a__="cls" , a__=False , a__=True , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_UpperCAmelCase = project_dim
_UpperCAmelCase = pooler_fn
_UpperCAmelCase = learn_encoder
_UpperCAmelCase = use_attention_mask
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = [R"""pooler""", R"""logit_scale"""]
lowerCAmelCase__ = [R"""position_ids""", R"""predictions.decoder.bias"""]
lowerCAmelCase__ = """roberta"""
lowerCAmelCase__ = RobertaSeriesConfig
def __init__( self , a__ ):
super().__init__(a__ )
_UpperCAmelCase = XLMRobertaModel(a__ )
_UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
_UpperCAmelCase = getattr(a__ , 'has_pre_transformation' , a__ )
if self.has_pre_transformation:
_UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
_UpperCAmelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __A ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , ):
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.base_model(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_attentions=a__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=a__ , )
if self.has_pre_transformation:
_UpperCAmelCase = outputs['hidden_states'][-2]
_UpperCAmelCase = self.pre_LN(a__ )
_UpperCAmelCase = self.transformation_pre(a__ )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_UpperCAmelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 494 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 494 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
snake_case__ : List[str] = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """retribert"""
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ) -> List[Any]:
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = share_encoders
UpperCamelCase_ = projection_dim
| 23 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 332 | 0 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Dict = "xlm-prophetnet"
lowerCAmelCase__ : str = ["past_key_values"]
lowerCAmelCase__ : Optional[int] = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : int , snake_case : Optional[float] = 0.1 , snake_case : Optional[Union[str, Callable]] = "gelu" , snake_case : Optional[int] = 30522 , snake_case : Optional[int] = 1024 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[float] = 0.1 , snake_case : Optional[float] = 0.1 , snake_case : Optional[int] = 512 , snake_case : Optional[float] = 0.02 , snake_case : Optional[bool] = True , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 2 , snake_case : Optional[int] = 32 , snake_case : Optional[int] = 128 , snake_case : Optional[bool] = False , snake_case : Optional[float] = 0.0 , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 1 , snake_case : Optional[int] = 2 , **snake_case : List[str] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = num_encoder_layers
__UpperCamelCase = num_encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = num_decoder_layers
__UpperCamelCase = num_decoder_attention_heads
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = init_std # Normal(0, this parameter)
__UpperCamelCase = activation_function
# parameters for xlmprophetnet
__UpperCamelCase = ngram
__UpperCamelCase = num_buckets
__UpperCamelCase = relative_max_distance
__UpperCamelCase = disable_ngram_loss
__UpperCamelCase = eps
# 3 Types of Dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = dropout
__UpperCamelCase = use_cache
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , add_cross_attention=snake_case , decoder_start_token_id=snake_case , **snake_case , )
@property
def snake_case ( self : Any ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def snake_case ( self : Tuple , snake_case : Dict ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 710 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , snake_case : Optional[Any] , snake_case : Tuple=13 , snake_case : List[Any]=32 , snake_case : Optional[Any]=2 , snake_case : Dict=3 , snake_case : Union[str, Any]=16 , snake_case : Optional[int]=[32, 64, 128] , snake_case : int=[1, 2, 1] , snake_case : Any=[2, 2, 4] , snake_case : Optional[int]=2 , snake_case : List[str]=2.0 , snake_case : Any=True , snake_case : Optional[Any]=0.0 , snake_case : str=0.0 , snake_case : List[Any]=0.1 , snake_case : List[str]="gelu" , snake_case : Any=False , snake_case : Optional[int]=True , snake_case : List[str]=0.02 , snake_case : List[Any]=1E-5 , snake_case : str=True , snake_case : Any=None , snake_case : int=True , snake_case : Optional[Any]=10 , snake_case : List[str]=8 , snake_case : List[str]=["stage1", "stage2"] , snake_case : Optional[Any]=[1, 2] , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = embed_dim
__UpperCamelCase = hidden_sizes
__UpperCamelCase = depths
__UpperCamelCase = num_heads
__UpperCamelCase = window_size
__UpperCamelCase = mlp_ratio
__UpperCamelCase = qkv_bias
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = drop_path_rate
__UpperCamelCase = hidden_act
__UpperCamelCase = use_absolute_embeddings
__UpperCamelCase = patch_norm
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = is_training
__UpperCamelCase = scope
__UpperCamelCase = use_labels
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = encoder_stride
__UpperCamelCase = out_features
__UpperCamelCase = out_indices
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self : List[str] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case ( self : int , snake_case : Optional[int] , snake_case : List[str] , snake_case : Optional[int] ):
__UpperCamelCase = FocalNetModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
__UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case ( self : Optional[Any] , snake_case : Any , snake_case : Dict , snake_case : int ):
__UpperCamelCase = FocalNetBackbone(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__UpperCamelCase = None
__UpperCamelCase = FocalNetBackbone(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : int , snake_case : Any ):
__UpperCamelCase = FocalNetForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = FocalNetForMaskedImageModeling(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case ( self : str , snake_case : Tuple , snake_case : List[str] , snake_case : Union[str, Any] ):
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = FocalNetForImageClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = FocalNetForImageClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : str ):
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Optional[Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : Optional[Any] = False
def snake_case ( self : Any ):
__UpperCamelCase = FocalNetModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=snake_case , embed_dim=37 , has_text_modality=snake_case )
def snake_case ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[Any] ):
return
def snake_case ( self : List[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def snake_case ( self : List[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
def snake_case ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case )
def snake_case ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def snake_case ( self : int ):
pass
def snake_case ( self : Tuple ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def snake_case ( self : Dict ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = model_class(snake_case )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case )
def snake_case ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] ):
__UpperCamelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# FocalNet has a different seq_length
__UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(snake_case ) , snake_case )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = reshaped_hidden_states[0].shape
__UpperCamelCase = (
reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def snake_case ( self : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = 3
__UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
@slow
def snake_case ( self : Union[str, Any] ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = FocalNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case ( self : Optional[int] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(snake_case )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : str ):
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def snake_case ( self : List[str] ):
__UpperCamelCase = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(snake_case )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__UpperCamelCase = image_processor(images=snake_case , return_tensors='''pt''' ).to(snake_case )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**snake_case )
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
__UpperCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : str = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ : Dict = FocalNetConfig
lowerCAmelCase__ : List[str] = False
def snake_case ( self : Dict ):
__UpperCamelCase = FocalNetModelTester(self )
| 375 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class __A :
def __init__( self : Any , __snake_case : Dict , __snake_case : List[str] , __snake_case : Dict , __snake_case : Optional[int]=None , __snake_case : Optional[Any]=None ) -> Tuple:
__magic_name__: List[str] = start
__magic_name__: Tuple = end
__magic_name__: List[str] = val
__magic_name__: str = (start + end) // 2
__magic_name__: Any = left
__magic_name__: Tuple = right
def __repr__( self : List[Any] ) -> List[str]:
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class __A :
def __init__( self : Any , __snake_case : Sequence , __snake_case : List[str] ) -> Tuple:
__magic_name__: List[Any] = collection
__magic_name__: Tuple = function
if self.collection:
__magic_name__: int = self._build_tree(0 , len(__snake_case ) - 1 )
def lowerCamelCase__ ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple ) -> Union[str, Any]:
self._update_tree(self.root , __snake_case , __snake_case )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Any , __snake_case : int ) -> Optional[Any]:
return self._query_range(self.root , __snake_case , __snake_case )
def lowerCamelCase__ ( self : Dict , __snake_case : List[Any] , __snake_case : Dict ) -> Tuple:
if start == end:
return SegmentTreeNode(__snake_case , __snake_case , self.collection[start] )
__magic_name__: str = (start + end) // 2
__magic_name__: List[str] = self._build_tree(__snake_case , __snake_case )
__magic_name__: Any = self._build_tree(mid + 1 , __snake_case )
return SegmentTreeNode(__snake_case , __snake_case , self.fn(left.val , right.val ) , __snake_case , __snake_case )
def lowerCamelCase__ ( self : List[Any] , __snake_case : Any , __snake_case : str , __snake_case : Any ) -> Optional[int]:
if node.start == i and node.end == i:
__magic_name__: Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , __snake_case , __snake_case )
else:
self._update_tree(node.right , __snake_case , __snake_case )
__magic_name__: str = self.fn(node.left.val , node.right.val )
def lowerCamelCase__ ( self : str , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Tuple ) -> Any:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __snake_case , __snake_case )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __snake_case , node.mid ) , self._query_range(node.right , node.mid + 1 , __snake_case ) , )
else:
# range in right child tree
return self._query_range(node.right , __snake_case , __snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
if self.root is not None:
__magic_name__: Union[str, Any] = Queue()
queue.put(self.root )
while not queue.empty():
__magic_name__: Any = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
__lowerCamelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 96 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__A : List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = {}
with open(UpperCamelCase__ , '''r''' ) as file:
for line_number, line in enumerate(UpperCamelCase__ ):
UpperCAmelCase = line.strip()
if line:
UpperCAmelCase = line.split()
UpperCAmelCase = line_number
UpperCAmelCase = words[0]
UpperCAmelCase = value
return result
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = hf_pointer
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase = value[0]
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = '''.'''.join([key, hf_param_name] )
else:
UpperCAmelCase = key
UpperCAmelCase = value if '''lm_head''' in full_key else value[0]
__A : str = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase = mapped_key.replace('''*''' , UpperCamelCase__ )
if "weight_g" in name:
UpperCAmelCase = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase = '''weight_v'''
elif "bias" in name:
UpperCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase = '''weight'''
else:
UpperCAmelCase = None
if hf_dict is not None:
rename_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return is_used
return is_used
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase = True
else:
UpperCAmelCase = load_wavaveca_layer(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase = name.split('''.''' )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
else:
UpperCAmelCase = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase = read_txt_into_dict(UpperCamelCase__ )
UpperCAmelCase = idalabel
UpperCAmelCase = WavaVecaForSequenceClassification(UpperCamelCase__ )
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
feature_extractor.save_pretrained(UpperCamelCase__ )
elif is_finetuned:
if dict_path:
UpperCAmelCase = Dictionary.load(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase = target_dict.pad_index
UpperCAmelCase = target_dict.bos_index
UpperCAmelCase = target_dict.eos_index
UpperCAmelCase = len(target_dict.symbols )
UpperCAmelCase = os.path.join(UpperCamelCase__ , '''vocab.json''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase = 0
UpperCAmelCase = 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = WavaVecaCTCTokenizer(
UpperCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=UpperCamelCase__ , )
UpperCAmelCase = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
UpperCAmelCase = WavaVecaForCTC(UpperCamelCase__ )
else:
UpperCAmelCase = WavaVecaForPreTraining(UpperCamelCase__ )
if is_finetuned or is_seq_class:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase = fairseq.tasks.setup_task(UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase__ )
UpperCAmelCase = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__A : Dict = parser.parse_args()
__A : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 130 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class __snake_case ( lowercase__ ):
lowerCAmelCase_ = "audio-spectrogram-transformer"
def __init__( self : str , _lowercase : Dict=7_68 , _lowercase : List[str]=12 , _lowercase : List[str]=12 , _lowercase : Optional[int]=30_72 , _lowercase : Dict="gelu" , _lowercase : str=0.0 , _lowercase : int=0.0 , _lowercase : int=0.02 , _lowercase : Optional[int]=1E-12 , _lowercase : Optional[int]=16 , _lowercase : Tuple=True , _lowercase : List[str]=10 , _lowercase : int=10 , _lowercase : int=10_24 , _lowercase : Optional[Any]=1_28 , **_lowercase : List[str] , ):
"""simple docstring"""
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = frequency_stride
SCREAMING_SNAKE_CASE__ = time_stride
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = num_mel_bins
| 704 | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__lowerCamelCase : Tuple = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 379 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Union[str, Any] = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_A = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class lowerCamelCase( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_A = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , 'a+' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_A = val.item()
_A = F"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , 'test' )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27 |
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
snake_case : Tuple = [False] * len(__magic_name__ )
snake_case : Optional[Any] = []
queue.append(__magic_name__ )
snake_case : Tuple = True
while queue:
snake_case : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
snake_case : Dict = True
snake_case : Dict = u
return visited[t]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
snake_case : Any = [-1] * (len(__magic_name__ ))
snake_case : Union[str, Any] = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Tuple = float('''Inf''' )
snake_case : Any = sink
while s != source:
# Find the minimum value in select path
snake_case : List[str] = min(__magic_name__ , graph[parent[s]][s] )
snake_case : str = parent[s]
max_flow += path_flow
snake_case : Optional[Any] = sink
while v != source:
snake_case : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case : int = parent[v]
return max_flow
_a : Union[str, Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a : Optional[int] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 598 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class snake_case :
def __init__( self ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=64 ,UpperCAmelCase_=None ) -> Optional[Any]:
lowercase__ = np.random.default_rng(UpperCAmelCase_ )
lowercase__ = length
lowercase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowercase__ = a * self.x + b + rng.normal(scale=0.1 ,size=(length,) ).astype(np.floataa )
def __len__( self ) -> str:
return self.length
def __getitem__( self ,UpperCAmelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class snake_case (torch.nn.Module ):
def __init__( self ,UpperCAmelCase_=0 ,UpperCAmelCase_=0 ,UpperCAmelCase_=False ) -> Tuple:
super().__init__()
lowercase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ = True
def _a ( self ,UpperCAmelCase_=None ) -> int:
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
lowercase__ = False
return x * self.a[0] + self.b[0]
class snake_case (torch.nn.Module ):
def __init__( self ,UpperCAmelCase_=0 ,UpperCAmelCase_=0 ,UpperCAmelCase_=False ) -> str:
super().__init__()
lowercase__ = torch.nn.Parameter(torch.tensor(UpperCAmelCase_ ).float() )
lowercase__ = torch.nn.Parameter(torch.tensor(UpperCAmelCase_ ).float() )
lowercase__ = True
def _a ( self ,UpperCAmelCase_=None ) -> Any:
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
lowercase__ = False
return x * self.a + self.b
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Tuple = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
lowercase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowercase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowercase__ = load_dataset("csv" ,data_files=_snake_case )
lowercase__ = datasets["""train"""].unique("label" )
lowercase__ = {v: i for i, v in enumerate(_snake_case )}
def tokenize_function(_snake_case : str ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(
examples["sentence1"] ,examples["sentence2"] ,truncation=_snake_case ,max_length=_snake_case ,padding="max_length" )
if "label" in examples:
lowercase__ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
_snake_case ,batched=_snake_case ,remove_columns=["sentence1", "sentence2", "label"] ,)
def collate_fn(_snake_case : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_snake_case ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return tokenizer.pad(_snake_case ,padding="longest" ,return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(tokenized_datasets["train"] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=2 )
lowercase__ = DataLoader(tokenized_datasets["validation"] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=1 )
return train_dataloader, eval_dataloader
| 714 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class snake_case (UpperCamelCase ):
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> List[str]:
super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = {}
def _a ( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> str:
lowercase__ = super().add_tokens(UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
" `placeholder_token` that is not already in the tokenizer." )
def _a ( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,UpperCAmelCase_=1 ,**UpperCAmelCase_ ) -> List[Any]:
lowercase__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
output.append(UpperCAmelCase_ )
else:
lowercase__ = []
for i in range(UpperCAmelCase_ ):
lowercase__ = placeholder_token + F'''_{i}'''
self.try_adding_tokens(UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
output.append(UpperCAmelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowercase__ = output
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_=1.0 ) -> Optional[int]:
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
lowercase__ = []
for i in range(len(UpperCAmelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=UpperCAmelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase__ = self.token_map[placeholder_token]
lowercase__ = tokens[: 1 + int(len(UpperCAmelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase__ = copy.copy(UpperCAmelCase_ )
random.shuffle(UpperCAmelCase_ )
lowercase__ = text.replace(UpperCAmelCase_ ," ".join(UpperCAmelCase_ ) )
return text
def __call__( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_=1.0 ,**UpperCAmelCase_ ) -> int:
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ ,vector_shuffle=UpperCAmelCase_ ,prop_tokens_to_load=UpperCAmelCase_ ) ,*UpperCAmelCase_ ,**UpperCAmelCase_ ,)
def _a ( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_=1.0 ,**UpperCAmelCase_ ) -> Optional[int]:
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ ,vector_shuffle=UpperCAmelCase_ ,prop_tokens_to_load=UpperCAmelCase_ ) ,*UpperCAmelCase_ ,**UpperCAmelCase_ ,)
| 539 | 0 |
"""simple docstring"""
import os
def A__ ( A__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as input_file:
_UpperCAmelCase = [
[int(A__ ) for element in line.split("," )]
for line in input_file.readlines()
]
_UpperCAmelCase = len(A__ )
_UpperCAmelCase = len(matrix[0] )
_UpperCAmelCase = [[-1 for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
_UpperCAmelCase = matrix[i][0]
for j in range(1 , A__ ):
for i in range(A__ ):
_UpperCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , A__ ):
_UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 426 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( A__ , A__ ) -> Optional[int]:
'''simple docstring'''
assert isinstance(A__ , A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A__ ( A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_json_dataset(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A__ ( A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
_check_json_dataset(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def A__ ( A__ , A__ , A__ ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
assert isinstance(A__ , A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( A__ , A__ ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
_UpperCAmelCase = features.copy()
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
assert isinstance(A__ , A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A__ ( A__ , A__ , A__ ) -> str:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ , split=A__ ).read()
_check_json_dataset(A__ , A__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A__ ( A__ , A__ , A__ ) -> Dict:
'''simple docstring'''
if issubclass(A__ , A__ ):
_UpperCAmelCase = jsonl_path
elif issubclass(A__ , A__ ):
_UpperCAmelCase = [jsonl_path]
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ ).read()
_check_json_dataset(A__ , A__ )
def A__ ( A__ , A__ , A__=("train",) ) -> List[str]:
'''simple docstring'''
assert isinstance(A__ , A__ )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A__ ( A__ , A__ , A__ ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_json_datasetdict(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A__ ( A__ , A__ , A__ ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=A__ , cache_dir=A__ ).read()
_check_json_datasetdict(A__ , A__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A__ ( A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
if split:
_UpperCAmelCase = {split: jsonl_path}
else:
_UpperCAmelCase = "train"
_UpperCAmelCase = {"train": jsonl_path, "test": jsonl_path}
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ ).read()
_check_json_datasetdict(A__ , A__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( A__ ) -> List[Any]:
'''simple docstring'''
return json.load(A__ )
def A__ ( A__ ) -> int:
'''simple docstring'''
return [json.loads(A__ ) for line in buffer]
class a :
"""simple docstring"""
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
assert isinstance(exported_content[0] , snake_case_ )
assert len(snake_case_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , orient=snake_case_ ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
assert isinstance(exported_content[0] , snake_case_ )
assert len(snake_case_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , orient=snake_case_ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case_ ) == 10
def __A ( self , snake_case_ ) -> Any:
with pytest.raises(snake_case_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
_UpperCAmelCase = tmp_path_factory.mktemp("data" ) / F"""test.json.{extension}"""
_UpperCAmelCase = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(snake_case_ , snake_case_ , compression=snake_case_ ).write()
with fsspec.open(snake_case_ , "rb" , compression="infer" ) as f:
_UpperCAmelCase = f.read()
with fsspec.open(snake_case_ , "rb" , compression="infer" ) as f:
_UpperCAmelCase = f.read()
assert exported_content == original_content
| 426 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Dict = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : int = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Tuple = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 458 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
__lowerCamelCase : Union[str, Any] = SamImageProcessor()
__lowerCamelCase : int = SamProcessor(_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Tuple , **_lowerCamelCase : Any ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor
def _snake_case ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Any = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCamelCase : Optional[int] = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Tuple = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[int] = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
__lowerCamelCase : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : int = self.get_image_processor()
__lowerCamelCase : Tuple = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = self.prepare_image_inputs()
__lowerCamelCase : List[str] = image_processor(_lowerCamelCase , return_tensors="""np""" )
__lowerCamelCase : List[Any] = processor(images=_lowerCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Dict = self.get_image_processor()
__lowerCamelCase : Union[str, Any] = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = [torch.ones((1, 3, 5, 5) )]
__lowerCamelCase : Any = [[1_7_6_4, 2_6_4_6]]
__lowerCamelCase : Any = [[6_8_3, 1_0_2_4]]
__lowerCamelCase : Tuple = processor.post_process_masks(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCamelCase : Union[str, Any] = processor.post_process_masks(
_lowerCamelCase , torch.tensor(_lowerCamelCase ) , torch.tensor(_lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
__lowerCamelCase : Any = [np.ones((1, 3, 5, 5) )]
__lowerCamelCase : int = processor.post_process_masks(_lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCamelCase : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(_lowerCamelCase ):
__lowerCamelCase : Union[str, Any] = processor.post_process_masks(_lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) )
@require_vision
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
__lowerCamelCase : int = SamImageProcessor()
__lowerCamelCase : Dict = SamProcessor(_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : List[str] , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor
def _snake_case ( self : str ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Any = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCamelCase : str = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Dict = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
__lowerCamelCase : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.get_image_processor()
__lowerCamelCase : Tuple = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : str = self.prepare_image_inputs()
__lowerCamelCase : List[Any] = image_processor(_lowerCamelCase , return_tensors="""np""" )
__lowerCamelCase : Union[str, Any] = processor(images=_lowerCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.get_image_processor()
__lowerCamelCase : str = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : Any = [tf.ones((1, 3, 5, 5) )]
__lowerCamelCase : Optional[Any] = [[1_7_6_4, 2_6_4_6]]
__lowerCamelCase : List[str] = [[6_8_3, 1_0_2_4]]
__lowerCamelCase : Optional[int] = processor.post_process_masks(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCamelCase : Optional[Any] = processor.post_process_masks(
_lowerCamelCase , tf.convert_to_tensor(_lowerCamelCase ) , tf.convert_to_tensor(_lowerCamelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
__lowerCamelCase : Tuple = [np.ones((1, 3, 5, 5) )]
__lowerCamelCase : str = processor.post_process_masks(
_lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCamelCase : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowerCamelCase : Optional[Any] = processor.post_process_masks(
_lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = tempfile.mkdtemp()
__lowerCamelCase : Union[str, Any] = SamImageProcessor()
__lowerCamelCase : Dict = SamProcessor(_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Optional[int] , **_lowerCamelCase : Tuple ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor
def _snake_case ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : List[str] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCamelCase : str = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.get_image_processor()
__lowerCamelCase : Tuple = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : str = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowerCamelCase : Tuple = [tf.convert_to_tensor(_lowerCamelCase )]
__lowerCamelCase : List[str] = [torch.tensor(_lowerCamelCase )]
__lowerCamelCase : Optional[int] = [[1_7_6_4, 2_6_4_6]]
__lowerCamelCase : Tuple = [[6_8_3, 1_0_2_4]]
__lowerCamelCase : int = processor.post_process_masks(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , return_tensors="""tf""" )
__lowerCamelCase : Any = processor.post_process_masks(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = self.get_image_processor()
__lowerCamelCase : int = SamProcessor(image_processor=_lowerCamelCase )
__lowerCamelCase : Any = self.prepare_image_inputs()
__lowerCamelCase : Any = image_processor(_lowerCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
__lowerCamelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
__lowerCamelCase : Dict = image_processor(_lowerCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
__lowerCamelCase : int = processor(images=_lowerCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) )
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) )
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) )
| 458 | 1 |
def __snake_case ( __UpperCamelCase : int = 100 ):
"""simple docstring"""
A_ = n * (n + 1) * (2 * n + 1) / 6
A_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"{solution() = }") | 86 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__magic_name__ : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__magic_name__ : Optional[Any] = 12_8022
__magic_name__ : Dict = 12_8028
@require_sentencepiece
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = MaMaaaTokenizer
snake_case__ = False
snake_case__ = False
snake_case__ = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['spm_file'] )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : str , **_SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = '</s>'
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
UpperCamelCase = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , 'This is a test' )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
'''simple docstring'''
snake_case__ = """facebook/m2m100_418M"""
snake_case__ = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
snake_case__ = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
snake_case__ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ):
"""simple docstring"""
UpperCamelCase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
UpperCamelCase = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 12_8063 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 'en'
UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
UpperCamelCase = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , _SCREAMING_SNAKE_CASE )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'en'
UpperCamelCase = 'fr'
UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
UpperCamelCase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
UpperCamelCase = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCamelCase = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
} , )
| 280 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A_ : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_UpperCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_UpperCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def A ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ) -> Any:
"""simple docstring"""
a = ZeroShotClassificationPipeline(
model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def A ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
a = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(__lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase )]} )
# No kwarg
a = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(__lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase )]} )
a = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(__lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase )]} )
a = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
__lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
a = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
__lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
a = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(__lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
a = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
__lowerCAmelCase , [
{"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]}
for i in range(1 )
] , )
a = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
__lowerCAmelCase , [
{"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(__lowerCAmelCase ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(__lowerCAmelCase ):
classifier(__lowerCAmelCase , candidate_labels="politics" )
with self.assertRaises(__lowerCAmelCase ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(__lowerCAmelCase ):
classifier("Who are you voting for in 2020?" , candidate_labels=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(__lowerCAmelCase ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=__lowerCAmelCase , )
self.run_entailment_id(__lowerCAmelCase )
def A ( self : List[str] , __lowerCAmelCase : Pipeline ) -> Dict:
"""simple docstring"""
a = zero_shot_classifier.model.config
a = config.labelaid
a = zero_shot_classifier.entailment_id
a = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
a = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
a = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
a = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
a = original_labelaid
self.assertEqual(__lowerCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
a = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def A ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
a = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
a = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def A ( self : int ) -> Union[str, Any]:
"""simple docstring"""
a = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
a = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
a = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
a = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
a = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=__lowerCAmelCase , )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
a = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
a = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=__lowerCAmelCase , )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 32 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
a = [[1, 2, 4], [1, 2, 3, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def A ( self : int ) -> Any:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(3 )
a = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 32 | 1 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_lowercase : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_lowercase : str = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_lowercase : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_lowercase : int = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
_lowercase : int = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
_lowercase : Tuple = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
_lowercase : str = tf.keras.preprocessing.image.img_to_array(test_image)
_lowercase : Dict = np.expand_dims(test_image, axis=0)
_lowercase : Tuple = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_lowercase : Dict = "Normal"
if result[0][0] == 1:
_lowercase : str = "Abnormality detected"
| 641 |
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: Any=False ) -> str:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = len(set_a.intersection(UpperCamelCase__ ) )
if alternative_union:
A = len(UpperCamelCase__ ) + len(UpperCamelCase__ )
else:
A = len(set_a.union(UpperCamelCase__ ) )
return intersection / union
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) ):
A = [element for element in set_a if element in set_b]
if alternative_union:
A = len(UpperCamelCase__ ) + len(UpperCamelCase__ )
return len(UpperCamelCase__ ) / union
else:
A = set_a + [element for element in set_b if element not in set_a]
return len(UpperCamelCase__ ) / len(UpperCamelCase__ )
return len(UpperCamelCase__ ) / len(UpperCamelCase__ )
return None
if __name__ == "__main__":
_lowercase : List[Any] = {"a", "b", "c", "d", "e"}
_lowercase : List[Any] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 641 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Any=False , _lowerCamelCase : Optional[int]=10 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : str=32 * 8 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : Union[str, Any]=64 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = hidden_dim
_snake_case = hidden_dim
def lowercase ( self : Optional[int] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : Union[str, Any] ):
_snake_case = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_snake_case = self.num_queries
_snake_case = self.num_labels
_snake_case = [1, 1, 1, 1]
_snake_case = self.num_channels
_snake_case = 64
_snake_case = 128
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
return config
def lowercase ( self : Union[str, Any] ):
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.prepare_config_and_inputs()
_snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def lowercase ( self : Any , _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Any=False ):
with torch.no_grad():
_snake_case = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
_snake_case = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
_snake_case = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__a = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : Any ):
_snake_case = MaskaFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase ( self : List[str] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : int ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowercase ( self : Union[str, Any] ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowercase ( self : Dict ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase ( self : Tuple ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : str ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def lowercase ( self : Dict ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_snake_case = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowercase ( self : Tuple ):
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowerCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowerCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowerCamelCase ).long(),
}
_snake_case = self.model_tester.get_config()
_snake_case = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : List[str] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : Any ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : int ):
if not self.model_tester.is_training:
return
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Dict ):
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase__ = 1e-4
def _UpperCAmelCase ( ) -> int:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Tuple ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase ( self : Optional[Any] ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase ( self : Any ):
_snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
_snake_case = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : List[Any] ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_snake_case = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_snake_case = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : List[str] ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_snake_case = inputs['''pixel_values'''].to(_lowerCamelCase )
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''mask_labels''']]
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 430 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool:
if num < 0:
return False
_snake_case = num
_snake_case = 0
while num > 0:
_snake_case = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430 | 1 |
import math
def lowerCamelCase ( a_ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase ( a_ = 10_001 ) -> int:
try:
lowerCAmelCase_ = int(a_ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
lowerCAmelCase_ = []
lowerCAmelCase_ = 2
while len(a_ ) < nth:
if is_prime(a_ ):
primes.append(a_ )
num += 1
else:
num += 1
return primes[len(a_ ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = OpenLlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )
lowerCAmelCase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = OpenLlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
'''simple docstring'''
lowerCAmelCase_ = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
lowerCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
lowerCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__a: Dict = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__a: List[str] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a: Optional[int] = False
__a: Tuple = False
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = OpenLlamaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7 )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*lowercase_ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = input_dict['input_ids']
lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = 'single_label_classification'
lowerCAmelCase_ = input_dict['input_ids']
lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = 'multi_label_classification'
lowerCAmelCase_ = input_dict['input_ids']
lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase_ = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def _lowercase ( self ) -> int:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowercase ( self , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = OpenLlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
lowerCAmelCase_ = original_model(lowercase_ ).last_hidden_state
lowerCAmelCase_ = original_model(lowercase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase_ = OpenLlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
lowerCAmelCase_ = scaled_model(lowercase_ ).last_hidden_state
lowerCAmelCase_ = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
| 318 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
__lowerCamelCase : List[Any] = 6_37_81_37.0
__lowerCamelCase : Optional[Any] = 6_35_67_52.31_42_45
__lowerCamelCase : Optional[int] = 6_37_81_37
def A__ ( _a : float , _a : float , _a : float , _a : float ):
'''simple docstring'''
snake_case__ : int =(AXIS_A - AXIS_B) / AXIS_A
snake_case__ : Optional[int] =atan((1 - flattening) * tan(radians(_a ) ) )
snake_case__ : Union[str, Any] =atan((1 - flattening) * tan(radians(_a ) ) )
snake_case__ : Optional[int] =radians(_a )
snake_case__ : Any =radians(_a )
# Equation
snake_case__ : List[str] =sin((phi_a - phi_a) / 2 )
snake_case__ : List[str] =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case__ : Union[str, Any] =sqrt(sin_sq_phi + (cos(_a ) * cos(_a ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 448 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCamelCase : Dict = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__lowerCamelCase : Optional[int] = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__lowerCamelCase : List[str] = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def lowercase__ ( self ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowercase__ ( self , a , a , a = CHRF.CHAR_ORDER , a = CHRF.WORD_ORDER , a = CHRF.BETA , a = False , a = False , a = False , ):
snake_case__ : str =len(references[0] )
if any(len(a ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
snake_case__ : List[Any] =[[refs[i] for refs in references] for i in range(a )]
snake_case__ : int =CHRF(a , a , a , a , a , a )
snake_case__ : int =sb_chrf.corpus_score(a , a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 448 | 1 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return [ord(__UpperCamelCase ) - 96 for elem in plain]
def __snake_case ( __UpperCamelCase : list[int] ):
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __snake_case ( ):
"""simple docstring"""
A_ = encode(input("-> " ).strip().lower() )
print("Encoded: " ,__UpperCamelCase )
print("Decoded:" ,decode(__UpperCamelCase ) )
if __name__ == "__main__":
main() | 86 |
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Any = [0 for i in range(r + 1 )]
# nc0 = 1
A_ : List[Any] = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
A_ : Tuple = min(_lowerCAmelCase ,_lowerCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 569 | 0 |
def lowerCamelCase ( UpperCamelCase : int ) -> int:
_lowerCamelCase = [1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0, 0, 0
_lowerCamelCase = ugly_nums[ia] * 2
_lowerCamelCase = ugly_nums[ia] * 3
_lowerCamelCase = ugly_nums[ia] * 5
for _ in range(1 , UpperCamelCase ):
_lowerCamelCase = min(UpperCamelCase , UpperCamelCase , UpperCamelCase )
ugly_nums.append(UpperCamelCase )
if next_num == next_a:
ia += 1
_lowerCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_lowerCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_lowerCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(2_0_0) = }''') | 234 | def lowerCamelCase ( UpperCamelCase : str ) -> list:
_lowerCamelCase = [0] * len(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
# use last results for better performance - dynamic programming
_lowerCamelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowerCamelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowerCamelCase = j
return prefix_result
def lowerCamelCase ( UpperCamelCase : str ) -> int:
return max(prefix_function(UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 234 | 1 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
__magic_name__ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
__magic_name__ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
__magic_name__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
__magic_name__ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
__magic_name__ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
__magic_name__ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
__magic_name__ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
__magic_name__ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
__magic_name__ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
__magic_name__ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
__magic_name__ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
__magic_name__ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
__magic_name__ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
__magic_name__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__magic_name__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__magic_name__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__magic_name__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__magic_name__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__magic_name__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__magic_name__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__magic_name__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__magic_name__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__magic_name__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__magic_name__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__magic_name__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__magic_name__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__magic_name__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : Optional[int] = FLAX_MODEL_MAPPING
__magic_name__ = auto_class_update(FlaxAutoModel)
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__magic_name__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : Optional[int] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__magic_name__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__magic_name__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__magic_name__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : List[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__magic_name__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : Union[str, Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__magic_name__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : str = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__magic_name__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__magic_name__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__magic_name__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : str = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__magic_name__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _lowerCAmelCase ( _BaseAutoModelClass ):
lowercase_ : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__magic_name__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 657 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowercase__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCamelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCamelCase = f"{src_lang}-{tgt_lang}"
_UpperCamelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
_UpperCamelCase = os.path.join(_UpperCamelCase , "README.md" )
print(f"Generating {path}" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
snake_case_ : List[Any] = Path(__file__).resolve().parent.parent.parent
snake_case_ : List[Any] = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : str = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 138 | 0 |
import os
def A ( __UpperCAmelCase = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) as in_file:
UpperCAmelCase_ = in_file.read()
UpperCAmelCase_ = [[int(__UpperCAmelCase ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
UpperCAmelCase_ = [[0 for cell in row] for row in grid]
UpperCAmelCase_ = len(grid[0] )
UpperCAmelCase_ = [[0 for i in range(__UpperCAmelCase )] for j in range(__UpperCAmelCase )]
UpperCAmelCase_ = grid[0][0]
for i in range(1 , __UpperCAmelCase ):
UpperCAmelCase_ = grid[0][i] + dp[0][i - 1]
for i in range(1 , __UpperCAmelCase ):
UpperCAmelCase_ = grid[i][0] + dp[i - 1][0]
for i in range(1 , __UpperCAmelCase ):
for j in range(1 , __UpperCAmelCase ):
UpperCAmelCase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"{solution() = }")
| 561 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCamelCase_ = logging.getLogger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="summarization"
UpperCamelCase__ : Union[str, Any] =["loss"]
UpperCamelCase__ : Tuple =ROUGE_KEYS
UpperCamelCase__ : List[Any] ="rouge2"
def __init__( self :int , _lowercase :List[Any] , **_lowercase :Tuple) -> Union[str, Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''')
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''')
super().__init__(_lowercase , num_labels=_lowercase , mode=self.mode , **_lowercase)
use_task_specific_params(self.model , '''summarization''')
save_git_info(self.hparams.output_dir)
UpperCAmelCase_ = Path(self.output_dir) / '''metrics.json'''
UpperCAmelCase_ = Path(self.output_dir) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path)
UpperCAmelCase_ = 0
UpperCAmelCase_ = defaultdict(_lowercase)
UpperCAmelCase_ = self.config.model_type
UpperCAmelCase_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
UpperCAmelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
UpperCAmelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
UpperCAmelCase_ = get_git_info()['''repo_sha''']
UpperCAmelCase_ = hparams.num_workers
UpperCAmelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _lowercase):
UpperCAmelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ = self.decoder_start_token_id
UpperCAmelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''') else LegacySeqaSeqDataset
)
UpperCAmelCase_ = False
UpperCAmelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ = self.model.config.max_length
UpperCAmelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __a ( self :List[Any] , _lowercase :Dict[str, torch.Tensor]) -> Dict[str, List[str]]:
UpperCAmelCase_ = {
k: self.tokenizer.batch_decode(v.tolist()) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_lowercase , Path(self.output_dir) / '''text_batch.json''')
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir) / '''tok_batch.json''')
UpperCAmelCase_ = True
return readable_batch
def __a ( self :Dict , _lowercase :Optional[Any] , **_lowercase :List[Any]) -> str:
return self.model(_lowercase , **_lowercase)
def __a ( self :Tuple , _lowercase :List[int]) -> str:
UpperCAmelCase_ = self.tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase)
return lmap(str.strip , _lowercase)
def __a ( self :List[str] , _lowercase :dict) -> Tuple:
UpperCAmelCase_ = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ = batch['''input_ids'''], batch['''attention_mask''']
UpperCAmelCase_ = batch['''labels''']
if isinstance(self.model , _lowercase):
UpperCAmelCase_ = self.model._shift_right(_lowercase)
else:
UpperCAmelCase_ = shift_tokens_right(_lowercase , _lowercase)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ = decoder_input_ids
self.save_readable_batch(_lowercase)
UpperCAmelCase_ = self(_lowercase , attention_mask=_lowercase , decoder_input_ids=_lowercase , use_cache=_lowercase)
UpperCAmelCase_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ = nn.CrossEntropyLoss(ignore_index=_lowercase)
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1]) , tgt_ids.view(-1))
else:
UpperCAmelCase_ = nn.functional.log_softmax(_lowercase , dim=-1)
UpperCAmelCase_ , UpperCAmelCase_ = label_smoothed_nll_loss(
_lowercase , _lowercase , self.hparams.label_smoothing , ignore_index=_lowercase)
return (loss,)
@property
def __a ( self :List[Any]) -> int:
return self.tokenizer.pad_token_id
def __a ( self :Any , _lowercase :Tuple , _lowercase :Optional[int]) -> Dict:
UpperCAmelCase_ = self._step(_lowercase)
UpperCAmelCase_ = dict(zip(self.loss_names , _lowercase))
# tokens per batch
UpperCAmelCase_ = batch['''input_ids'''].ne(self.pad).sum() + batch['''labels'''].ne(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].shape[0]
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __a ( self :Union[str, Any] , _lowercase :int , _lowercase :List[Any]) -> Dict:
return self._generative_step(_lowercase)
def __a ( self :int , _lowercase :List[str] , _lowercase :List[Any]="val") -> Dict:
self.step_count += 1
UpperCAmelCase_ = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
UpperCAmelCase_ = losses['''loss''']
UpperCAmelCase_ = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
UpperCAmelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ = torch.tensor(_lowercase).type_as(_lowercase)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(_lowercase)
UpperCAmelCase_ = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
UpperCAmelCase_ = self.step_count
self.metrics[prefix].append(_lowercase) # callback writes this to self.metrics_save_path
UpperCAmelCase_ = flatten_list([x['''preds'''] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def __a ( self :int , _lowercase :Optional[int] , _lowercase :Dict) -> Dict:
return calculate_rouge(_lowercase , _lowercase)
def __a ( self :Optional[Any] , _lowercase :dict) -> dict:
UpperCAmelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
UpperCAmelCase_ = self.ids_to_clean_text(_lowercase)
UpperCAmelCase_ = self.ids_to_clean_text(batch['''labels'''])
UpperCAmelCase_ = self._step(_lowercase)
UpperCAmelCase_ = dict(zip(self.loss_names , _lowercase))
UpperCAmelCase_ = self.calc_generative_metrics(_lowercase , _lowercase)
UpperCAmelCase_ = np.mean(lmap(_lowercase , _lowercase))
base_metrics.update(gen_time=_lowercase , gen_len=_lowercase , preds=_lowercase , target=_lowercase , **_lowercase)
return base_metrics
def __a ( self :Optional[Any] , _lowercase :int , _lowercase :Optional[Any]) -> Optional[int]:
return self._generative_step(_lowercase)
def __a ( self :str , _lowercase :List[Any]) -> List[Any]:
return self.validation_epoch_end(_lowercase , prefix='''test''')
def __a ( self :Union[str, Any] , _lowercase :Optional[int]) -> SeqaSeqDataset:
UpperCAmelCase_ = self.n_obs[type_path]
UpperCAmelCase_ = self.target_lens[type_path]
UpperCAmelCase_ = self.dataset_class(
self.tokenizer , type_path=_lowercase , n_obs=_lowercase , max_target_length=_lowercase , **self.dataset_kwargs , )
return dataset
def __a ( self :str , _lowercase :str , _lowercase :int , _lowercase :bool = False) -> DataLoader:
UpperCAmelCase_ = self.get_dataset(_lowercase)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_sortish_sampler(_lowercase , distributed=self.hparams.gpus > 1)
return DataLoader(
_lowercase , batch_size=_lowercase , collate_fn=dataset.collate_fn , shuffle=_lowercase , num_workers=self.num_workers , sampler=_lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1)
return DataLoader(
_lowercase , batch_sampler=_lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_lowercase , batch_size=_lowercase , collate_fn=dataset.collate_fn , shuffle=_lowercase , num_workers=self.num_workers , sampler=_lowercase , )
def __a ( self :int) -> DataLoader:
UpperCAmelCase_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_lowercase)
return dataloader
def __a ( self :int) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size)
def __a ( self :List[str]) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size)
@staticmethod
def __a ( _lowercase :List[Any] , _lowercase :str) -> List[Any]:
BaseTransformer.add_model_specific_args(_lowercase , _lowercase)
add_generic_args(_lowercase , _lowercase)
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''')
parser.add_argument('''--freeze_embeds''' , action='''store_true''')
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_lowercase)
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_lowercase)
parser.add_argument('''--max_tokens_per_batch''' , type=_lowercase , default=_lowercase)
parser.add_argument('''--logger_name''' , type=_lowercase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''')
parser.add_argument('''--n_train''' , type=_lowercase , default=-1 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_val''' , type=_lowercase , default=500 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_test''' , type=_lowercase , default=-1 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument(
'''--task''' , type=_lowercase , default='''summarization''' , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--label_smoothing''' , type=_lowercase , default=0.0 , required=_lowercase)
parser.add_argument('''--src_lang''' , type=_lowercase , default='''''' , required=_lowercase)
parser.add_argument('''--tgt_lang''' , type=_lowercase , default='''''' , required=_lowercase)
parser.add_argument('''--eval_beams''' , type=_lowercase , default=_lowercase , required=_lowercase)
parser.add_argument(
'''--val_metric''' , type=_lowercase , default=_lowercase , required=_lowercase , choices=['''bleu''', '''rouge2''', '''loss''', None])
parser.add_argument('''--eval_max_gen_length''' , type=_lowercase , default=_lowercase , help='''never generate more than n tokens''')
parser.add_argument('''--save_top_k''' , type=_lowercase , default=1 , required=_lowercase , help='''How many checkpoints to save''')
parser.add_argument(
'''--early_stopping_patience''' , type=_lowercase , default=-1 , required=_lowercase , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class a_ ( _snake_case ):
UpperCamelCase__ : str ="translation"
UpperCamelCase__ : str =["loss"]
UpperCamelCase__ : Optional[int] =["bleu"]
UpperCamelCase__ : List[str] ="bleu"
def __init__( self :Optional[int] , _lowercase :Optional[Any] , **_lowercase :Union[str, Any]) -> int:
super().__init__(_lowercase , **_lowercase)
UpperCAmelCase_ = hparams.src_lang
UpperCAmelCase_ = hparams.tgt_lang
def __a ( self :Dict , _lowercase :str , _lowercase :List[Any]) -> dict:
return calculate_bleu(_lowercase , _lowercase)
def A ( __UpperCAmelCase , __UpperCAmelCase=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__UpperCAmelCase )
check_output_dir(__UpperCAmelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ = SummarizationModule(__UpperCAmelCase )
else:
UpperCAmelCase_ = TranslationModule(__UpperCAmelCase )
UpperCAmelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
UpperCAmelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = os.environ.get('''WANDB_PROJECT''' , __UpperCAmelCase )
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=__UpperCAmelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = args.val_metric == '''loss'''
UpperCAmelCase_ = generic_train(
__UpperCAmelCase , __UpperCAmelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __UpperCAmelCase ) , early_stopping_callback=__UpperCAmelCase , logger=__UpperCAmelCase , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=__UpperCAmelCase ) )
if checkpoints:
UpperCAmelCase_ = checkpoints[-1]
UpperCAmelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = pl.Trainer.add_argparse_args(parser)
UpperCamelCase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCamelCase_ = parser.parse_args()
main(args)
| 561 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''Speech2TextFeatureExtractor'''
_lowercase : Any = '''Speech2TextTokenizer'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__(_lowercase , _lowercase )
_lowerCAmelCase = self.feature_extractor
_lowerCAmelCase = False
def __call__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_lowerCAmelCase = kwargs.pop("""raw_speech""" )
else:
_lowerCAmelCase = kwargs.pop("""audio""" , _lowercase )
_lowerCAmelCase = kwargs.pop("""sampling_rate""" , _lowercase )
_lowerCAmelCase = kwargs.pop("""text""" , _lowercase )
if len(_lowercase ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_lowerCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase = encodings["""input_ids"""]
return inputs
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowercase , **_lowercase )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_lowerCAmelCase = True
_lowerCAmelCase = self.tokenizer
yield
_lowerCAmelCase = self.feature_extractor
_lowerCAmelCase = False
| 5 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_UpperCAmelCase : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple[int, int]:
def constraint_to_multiple_of(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0 , _UpperCAmelCase=None ):
lowerCamelCase__ : List[str] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCamelCase__ : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCamelCase__ : List[Any] = math.ceil(val / multiple ) * multiple
return x
lowerCamelCase__ : Dict = (output_size, output_size) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else output_size
lowerCamelCase__ , lowerCamelCase__ : str = get_image_size(_UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = output_size
# determine new height and width
lowerCamelCase__ : List[str] = output_height / input_height
lowerCamelCase__ : str = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCamelCase__ : Optional[int] = scale_width
else:
# fit height
lowerCamelCase__ : Tuple = scale_height
lowerCamelCase__ : str = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCAmelCase )
lowerCamelCase__ : Any = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCAmelCase )
return (new_height, new_width)
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = ["""pixel_values"""]
def __init__( self : Any , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = False , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Optional[int] , ) -> None:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : Optional[int] = size if size is not None else {'height': 384, 'width': 384}
lowerCamelCase__ : Optional[Any] = get_size_dict(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : List[str] = size
lowerCamelCase__ : List[Any] = keep_aspect_ratio
lowerCamelCase__ : Dict = ensure_multiple_of
lowerCamelCase__ : str = resample
lowerCamelCase__ : int = do_rescale
lowerCamelCase__ : Any = rescale_factor
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : bool = False , UpperCAmelCase : int = 1 , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
lowerCamelCase__ : Tuple = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCamelCase__ : List[Any] = get_resize_output_image_size(
UpperCAmelCase , output_size=(size['height'], size['width']) , keep_aspect_ratio=UpperCAmelCase , multiple=UpperCAmelCase , )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ) -> str:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : int = None , UpperCAmelCase : bool = None , UpperCAmelCase : int = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Union[str, Any] , ) -> PIL.Image.Image:
lowerCamelCase__ : int = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Optional[Any] = size if size is not None else self.size
lowerCamelCase__ : List[str] = get_size_dict(UpperCAmelCase )
lowerCamelCase__ : int = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCamelCase__ : Dict = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCamelCase__ : List[str] = resample if resample is not None else self.resample
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : int = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Any = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Dict = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ : Dict = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : Tuple = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : int = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowerCamelCase__ : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCamelCase__ : Any = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Tuple] = None ) -> Dict:
lowerCamelCase__ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(UpperCAmelCase ):
lowerCamelCase__ : List[str] = target_sizes.numpy()
lowerCamelCase__ : Tuple = []
for idx in range(len(UpperCAmelCase ) ):
lowerCamelCase__ : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase )
lowerCamelCase__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase )
else:
lowerCamelCase__ : Dict = logits.argmax(dim=1 )
lowerCamelCase__ : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 295 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
SCREAMING_SNAKE_CASE : str = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowercase_ :str = os.path.dirname(os.path.realpath(snake_case__ ) )
lowercase_ :Any = os.path.join(snake_case__ , '''words.txt''' )
lowercase_ :Any = """"""
with open(snake_case__ ) as f:
lowercase_ :Any = f.readline()
lowercase_ :Union[str, Any] = [word.strip('''\"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
lowercase_ :int = [
word
for word in [sum(ord(snake_case__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution())
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : str = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 441 | 0 |
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
UpperCamelCase :str = hex_num[0] == '''-'''
if is_negative:
UpperCamelCase :Union[str, Any] = hex_num[1:]
try:
UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
UpperCamelCase :Dict = ''''''
while int_num > 0:
UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
UpperCamelCase :Dict = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase :Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCamelCase :int = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_55.0
UpperCamelCase :List[str] = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase :Tuple = 2.0 * image - 1.0
UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0.99_95 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
UpperCamelCase :int = True
UpperCamelCase :Dict = va.device
UpperCamelCase :List[Any] = va.cpu().numpy()
UpperCamelCase :str = va.cpu().numpy()
UpperCamelCase :Dict = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
UpperCamelCase :Any = (1 - t) * va + t * va
else:
UpperCamelCase :Union[str, Any] = np.arccos(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = theta_a * t
UpperCamelCase :str = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase :List[Any] = sin_theta_t / sin_theta_a
UpperCamelCase :Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ):
for param in model.parameters():
UpperCamelCase :Any = value
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> str:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Union[str, Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ )
else feature_extractor.size['''shortest_edge''']
)
UpperCamelCase :Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ )
set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase :Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# get the original timestep using init_timestep
UpperCamelCase :Union[str, Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase :Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
UpperCamelCase :Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
else:
UpperCamelCase :Any = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[str] = 0.1_8215 * init_latents
UpperCamelCase :Optional[Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase :List[Any] = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
UpperCamelCase :Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = init_latents
return latents
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[str] = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase :Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase :List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :str = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase :int = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
UpperCamelCase :List[str] = latents.detach().requires_grad_()
UpperCamelCase :List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase :int = torch.sqrt(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :str = self.scheduler.sigmas[index]
UpperCamelCase :Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :int = 1 / 0.1_8215 * sample
UpperCamelCase :List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :List[str] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype )
UpperCamelCase :List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale
UpperCamelCase :Union[str, Any] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = latents.detach() + grads * (sigma**2)
UpperCamelCase :Optional[Any] = noise_pred_original
else:
UpperCamelCase :List[str] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1:
UpperCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1)
UpperCamelCase :Tuple = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCamelCase :Union[str, Any] = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase :Dict = ''', '''.join(SCREAMING_SNAKE_CASE_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :Any = self.get_image_description(SCREAMING_SNAKE_CASE_ )
if style_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :str = self.get_image_description(SCREAMING_SNAKE_CASE_ )
# get prompt text embeddings for content and style
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :Dict = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase :Union[str, Any] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# set timesteps
UpperCamelCase :str = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase :List[str] = {}
if accepts_offset:
UpperCamelCase :Tuple = 1
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase , UpperCamelCase :Tuple = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
UpperCamelCase :Any = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# Preprocess image
UpperCamelCase :Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if clip_guidance_scale > 0:
UpperCamelCase :Dict = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = slerp(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase :Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase :Any = content_text_input.input_ids.shape[-1]
UpperCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCamelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase :Optional[int] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase :str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase :int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase :int = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase :str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase :Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase :Dict = {}
if accepts_eta:
UpperCamelCase :int = eta
# check if the scheduler accepts generator
UpperCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase :List[str] = generator
with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ):
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase :Any = noise_pred.chunk(2 )
UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase :int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase , UpperCamelCase :str = self.cond_fn(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase :List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[Any] = 1 / 0.1_8215 * latents
UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase :List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class snake_case ( _SCREAMING_SNAKE_CASE ):
lowercase_ = "perceiver"
def __init__( self : List[Any] , a_ : Tuple=256 , a_ : List[str]=1280 , a_ : List[Any]=768 , a_ : List[str]=1 , a_ : str=26 , a_ : Tuple=8 , a_ : Any=8 , a_ : str=None , a_ : Dict=None , a_ : int="kv" , a_ : int=1 , a_ : Dict=1 , a_ : str="gelu" , a_ : Any=0.1 , a_ : Any=0.02 , a_ : Optional[int]=1e-1_2 , a_ : List[str]=True , a_ : Dict=262 , a_ : Any=2048 , a_ : int=56 , a_ : List[str]=[368, 496] , a_ : List[Any]=16 , a_ : int=1920 , a_ : List[Any]=16 , a_ : Optional[int]=[1, 16, 224, 224] , **a_ : Union[str, Any] , )-> Any:
"""simple docstring"""
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ : Tuple = num_latents
SCREAMING_SNAKE_CASE__ : int = d_latents
SCREAMING_SNAKE_CASE__ : List[str] = d_model
SCREAMING_SNAKE_CASE__ : str = num_blocks
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE__ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = num_cross_attention_heads
SCREAMING_SNAKE_CASE__ : int = qk_channels
SCREAMING_SNAKE_CASE__ : str = v_channels
SCREAMING_SNAKE_CASE__ : Tuple = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE__ : Optional[int] = self_attention_widening_factor
SCREAMING_SNAKE_CASE__ : List[Any] = cross_attention_widening_factor
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : int = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE__ : Tuple = image_size
# flow attributes
SCREAMING_SNAKE_CASE__ : Optional[int] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE__ : List[str] = num_frames
SCREAMING_SNAKE_CASE__ : int = audio_samples_per_frame
SCREAMING_SNAKE_CASE__ : str = samples_per_patch
SCREAMING_SNAKE_CASE__ : Any = output_shape
class snake_case ( _SCREAMING_SNAKE_CASE ):
@property
def __lowercase( self : List[str] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE__ : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def __lowercase( self : Dict )-> float:
"""simple docstring"""
return 1e-4
def __lowercase( self : Tuple , a_ : List[Any] , a_ : List[Any] = -1 , a_ : Any = -1 , a_ : Dict = -1 , a_ : Optional[int] = False , a_ : Optional[Any] = None , a_ : Tuple = 3 , a_ : Optional[Any] = 40 , a_ : Any = 40 , )-> Mapping[str, Any]:
"""simple docstring"""
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(A_ , A_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : int = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = preprocessor.num_special_tokens_to_add(A_ )
SCREAMING_SNAKE_CASE__ : List[Any] = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : Optional[int] = [' '.join(['a'] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Any = dict(preprocessor(A_ , return_tensors=A_ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs.pop('input_ids' )
return inputs
elif isinstance(A_ , A_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : List[str] = compute_effective_axis_dimension(A_ , fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE__ : Any = self._generate_dummy_images(A_ , A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(preprocessor(images=A_ , return_tensors=A_ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 703 | def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _a ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 636 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _snake_case ( A_ : List[str] ):
"""simple docstring"""
a_ , a_ : Optional[int] = image.size
a_ , a_ : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a_ : List[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
a_ : Tuple = np.array(A_ ).astype(np.floataa ) / 255.0
a_ : Tuple = image[None].transpose(0 , 3 , 1 , 2 )
a_ : Tuple = torch.from_numpy(A_ )
return 2.0 * image - 1.0
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ):
'''simple docstring'''
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
a_ : str = 1
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
a_ : Tuple = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase_ )}''' )
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
a_ : Any = preprocess(lowerCAmelCase_ )
a_ , a_ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a_ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a_ : str = next(self.unet.parameters() ).dtype
a_ : Dict = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
a_ : Optional[Any] = image.to(device=self.device , dtype=lowerCAmelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase_ , device=self.device )
a_ : Dict = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a_ : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a_ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a_ : int = {}
if accepts_eta:
a_ : Union[str, Any] = eta
for t in self.progress_bar(lowerCAmelCase_ ):
# concat latents and low resolution image in the channel dimension.
a_ : int = torch.cat([latents, image] , dim=1 )
a_ : Dict = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
a_ : Optional[Any] = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
a_ : Tuple = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
# decode the image latents with the VQVAE
a_ : List[str] = self.vqvae.decode(lowerCAmelCase_ ).sample
a_ : Tuple = torch.clamp(lowerCAmelCase_ , -1.0 , 1.0 )
a_ : Optional[int] = image / 2 + 0.5
a_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a_ : str = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 577 |
'''simple docstring'''
import os
def _snake_case ( ):
"""simple docstring"""
with open(os.path.dirname(A_ ) + """/grid.txt""" ) as f:
a_ : Dict = [] # noqa: E741
for _ in range(20 ):
l.append([int(A_ ) for x in f.readline().split()] )
a_ : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
a_ : Optional[int] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a_ : List[str] = temp
# down
for i in range(17 ):
for j in range(20 ):
a_ : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a_ : Optional[int] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a_ : Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a_ : str = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a_ : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a_ : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 577 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase =logging.get_logger(__name__)
# General docstring
lowerCamelCase ="""RegNetConfig"""
# Base docstring
lowerCamelCase ="""facebook/regnet-y-040"""
lowerCamelCase =[1, 1_0_8_8, 7, 7]
# Image classification docstring
lowerCamelCase ="""facebook/regnet-y-040"""
lowerCamelCase ="""tabby, tabby cat"""
lowerCamelCase =[
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = "relu" , **__SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase__ : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase__ : List[str] = tf.keras.layers.ConvaD(
filters=_lowerCAmelCase , kernel_size=_lowerCAmelCase , strides=_lowerCAmelCase , padding='''VALID''' , groups=_lowerCAmelCase , use_bias=_lowerCAmelCase , name='''convolution''' , )
UpperCamelCase__ : List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
UpperCamelCase__ : List[Any] = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.convolution(self.padding(_lowerCAmelCase ) )
UpperCamelCase__ : List[str] = self.normalization(_lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = self.activation(_lowerCAmelCase )
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
UpperCamelCase__ : List[Any] = config.num_channels
UpperCamelCase__ : Union[str, Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = shape_list(_lowerCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase__ : Optional[int] = tf.transpose(_lowerCAmelCase , perm=(0, 2, 3, 1) )
UpperCamelCase__ : List[Any] = self.embedder(_lowerCAmelCase )
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , **__SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
UpperCamelCase__ : List[str] = tf.keras.layers.ConvaD(
filters=_lowerCAmelCase , kernel_size=1 , strides=_lowerCAmelCase , use_bias=_lowerCAmelCase , name='''convolution''' )
UpperCamelCase__ : str = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> tf.Tensor:
"""simple docstring"""
return self.normalization(self.convolution(_lowerCAmelCase ) , training=_lowerCAmelCase )
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
UpperCamelCase__ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name='''pooler''' )
UpperCamelCase__ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.pooler(_lowerCAmelCase )
for layer_module in self.attention:
UpperCamelCase__ : Any = layer_module(_lowerCAmelCase )
UpperCamelCase__ : List[str] = hidden_state * pooled
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
UpperCamelCase__ : Optional[int] = in_channels != out_channels or stride != 1
UpperCamelCase__ : Optional[Any] = max(1 , out_channels // config.groups_width )
UpperCamelCase__ : Any = (
TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase__ : int = [
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name='''layer.2''' ),
]
UpperCamelCase__ : str = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = hidden_state
for layer_module in self.layers:
UpperCamelCase__ : Optional[Any] = layer_module(_lowerCAmelCase )
UpperCamelCase__ : Any = self.shortcut(_lowerCAmelCase )
hidden_state += residual
UpperCamelCase__ : List[str] = self.activation(_lowerCAmelCase )
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
UpperCamelCase__ : Optional[int] = in_channels != out_channels or stride != 1
UpperCamelCase__ : Union[str, Any] = max(1 , out_channels // config.groups_width )
UpperCamelCase__ : int = (
TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
UpperCamelCase__ : Optional[Any] = [
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name='''layer.3''' ),
]
UpperCamelCase__ : List[Any] = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = hidden_state
for layer_module in self.layers:
UpperCamelCase__ : List[Any] = layer_module(_lowerCAmelCase )
UpperCamelCase__ : Optional[int] = self.shortcut(_lowerCAmelCase )
hidden_state += residual
UpperCamelCase__ : List[Any] = self.activation(_lowerCAmelCase )
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 2 , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
UpperCamelCase__ : Any = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
UpperCamelCase__ : Optional[Any] = [
# downsampling is done in the first layer with stride of 2
layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase , name='''layers.0''' ),
*[layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
for layer_module in self.layers:
UpperCamelCase__ : str = layer_module(_lowerCAmelCase )
return hidden_state
class _lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
UpperCamelCase__ : List[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
UpperCamelCase__ : List[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_lowerCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , depth=_lowerCAmelCase , name=F'''stages.{i+1}''' ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True ) -> TFBaseModelOutputWithNoAttention:
"""simple docstring"""
UpperCamelCase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase__ : int = hidden_states + (hidden_state,)
UpperCamelCase__ : List[Any] = stage_module(_lowerCAmelCase )
if output_hidden_states:
UpperCamelCase__ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase )
@keras_serializable
class _lowerCamelCase ( tf.keras.layers.Layer ):
SCREAMING_SNAKE_CASE_ = RegNetConfig
def __init__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = config
UpperCamelCase__ : str = TFRegNetEmbeddings(_lowerCAmelCase , name='''embedder''' )
UpperCamelCase__ : Any = TFRegNetEncoder(_lowerCAmelCase , name='''encoder''' )
UpperCamelCase__ : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name='''pooler''' )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
UpperCamelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Optional[Any] = self.embedder(_lowerCAmelCase , training=_lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCamelCase__ : Any = encoder_outputs[0]
UpperCamelCase__ : Tuple = self.pooler(_lowerCAmelCase )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase__ : List[str] = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) )
UpperCamelCase__ : str = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase__ : str = tuple([tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _lowerCamelCase ( _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = RegNetConfig
SCREAMING_SNAKE_CASE_ = 'regnet'
SCREAMING_SNAKE_CASE_ = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
lowerCamelCase =r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase =r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , _lowerCAmelCase , )
class _lowerCamelCase ( _lowerCAmelCase ):
def __init__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
UpperCamelCase__ : int = TFRegNetMainLayer(_lowerCAmelCase , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
"""simple docstring"""
UpperCamelCase__ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Dict = self.regnet(
pixel_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , _lowerCAmelCase , )
class _lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
def __init__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
UpperCamelCase__ : Any = config.num_labels
UpperCamelCase__ : List[Any] = TFRegNetMainLayer(_lowerCAmelCase , name='''regnet''' )
# classification head
UpperCamelCase__ : Any = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
"""simple docstring"""
UpperCamelCase__ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Tuple = self.regnet(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase__ : Dict = self.classifier[0](_lowerCAmelCase )
UpperCamelCase__ : Optional[int] = self.classifier[1](_lowerCAmelCase )
UpperCamelCase__ : List[str] = None if labels is None else self.hf_compute_loss(labels=_lowerCAmelCase , logits=_lowerCAmelCase )
if not return_dict:
UpperCamelCase__ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
| 709 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e_00 and cp <= 0x9f_ff)
or (cp >= 0x34_00 and cp <= 0x4d_bf) #
or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) #
or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) #
or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) #
or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) #
or (cp >= 0xf9_00 and cp <= 0xfa_ff)
or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) #
): #
return True
return False
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase__ : Optional[Any] = ord(UpperCamelCase__ )
if not _is_chinese_char(UpperCamelCase__ ):
return 0
return 1
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : Dict = set()
for token in tokens:
UpperCamelCase__ : Optional[int] = len(UpperCamelCase__ ) > 1 and is_chinese(UpperCamelCase__ )
if chinese_word:
word_set.add(UpperCamelCase__ )
UpperCamelCase__ : List[Any] = list(UpperCamelCase__ )
return word_list
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
if not chinese_word_set:
return bert_tokens
UpperCamelCase__ : str = max([len(UpperCamelCase__ ) for w in chinese_word_set] )
UpperCamelCase__ : Optional[int] = bert_tokens
UpperCamelCase__ ,UpperCamelCase__ : Tuple = 0, len(UpperCamelCase__ )
while start < end:
UpperCamelCase__ : Any = True
if is_chinese(bert_word[start] ):
UpperCamelCase__ : Any = min(end - start , UpperCamelCase__ )
for i in range(UpperCamelCase__ , 1 , -1 ):
UpperCamelCase__ : List[Any] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase__ : Optional[Any] = '''##''' + bert_word[j]
UpperCamelCase__ : Dict = start + i
UpperCamelCase__ : List[str] = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = []
for i in range(0 , len(UpperCamelCase__ ) , 1_0_0 ):
UpperCamelCase__ : Optional[Any] = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['''cws'''] ).cws
UpperCamelCase__ : List[str] = [get_chinese_word(UpperCamelCase__ ) for r in res]
ltp_res.extend(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
UpperCamelCase__ : str = []
for i in range(0 , len(UpperCamelCase__ ) , 1_0_0 ):
UpperCamelCase__ : Dict = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
UpperCamelCase__ : Any = []
for input_ids, chinese_word in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = []
for id in input_ids:
UpperCamelCase__ : List[str] = bert_tokenizer._convert_id_to_token(UpperCamelCase__ )
input_tokens.append(UpperCamelCase__ )
UpperCamelCase__ : Optional[Any] = add_sub_symbol(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ : Any = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase__ ):
if token[:2] == "##":
UpperCamelCase__ : Union[str, Any] = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase__ ) == 1 and _is_chinese_char(ord(UpperCamelCase__ ) ):
ref_id.append(UpperCamelCase__ )
ref_ids.append(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
return ref_ids
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ : List[Any] = f.readlines()
UpperCamelCase__ : Tuple = [line.strip() for line in data if len(UpperCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase__ : int = LTP(args.ltp ) # faster in GPU device
UpperCamelCase__ : int = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase__ : Dict = prepare_ref(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ : Tuple = [json.dumps(UpperCamelCase__ ) + '''\n''' for ref in ref_ids]
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
lowerCamelCase =parser.parse_args()
main(args)
| 462 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
lowerCAmelCase__ = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
lowerCAmelCase__ = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
lowerCAmelCase__ = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
lowerCAmelCase__ = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
lowerCAmelCase__ = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCAmelCase__ = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
lowerCAmelCase__ = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
lowerCAmelCase__ = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
lowerCAmelCase__ = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
lowerCAmelCase__ = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
lowerCAmelCase__ = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
lowerCAmelCase__ = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : str ) -> Dict:
return super().__call__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=64 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : float = 512 / 1_500 , SCREAMING_SNAKE_CASE__ : Optional[int] = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = 1 , ) -> Union[str, Any]:
lowerCAmelCase__ = load_image(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.image_processor.size["longest_edge"]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
lowerCAmelCase__ = self.get_inference_context()
with inference_context():
lowerCAmelCase__ = self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE__ , device=self.device )
lowerCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
lowerCAmelCase__ = image_embeddings
lowerCAmelCase__ = grid_points.shape[1]
lowerCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
lowerCAmelCase__ = input_labels[:, i : i + points_per_batch]
lowerCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=0.88 , SCREAMING_SNAKE_CASE__ : List[Any]=0.95 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : str=1 , ) -> Tuple:
lowerCAmelCase__ = model_inputs.pop("input_boxes" )
lowerCAmelCase__ = model_inputs.pop("is_last" )
lowerCAmelCase__ = model_inputs.pop("original_sizes" ).tolist()
lowerCAmelCase__ = model_inputs.pop("reshaped_input_sizes" ).tolist()
lowerCAmelCase__ = self.model(**SCREAMING_SNAKE_CASE__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCAmelCase__ = model_outputs["pred_masks"]
lowerCAmelCase__ = self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , binarize=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model_outputs["iou_scores"]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Any=0.7 , ) -> List[Any]:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
lowerCAmelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = {}
if output_rle_mask:
lowerCAmelCase__ = rle_mask
if output_bboxes_mask:
lowerCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 61 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCAmelCase_ ( __A : Features ):
'''simple docstring'''
snake_case: Any = np.inf
def set_batch_size(__A : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__A , __A ):
snake_case: str = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__A , __A ):
snake_case: Optional[int] = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__A , __A ) and feature.dtype == "binary":
snake_case: Union[str, Any] = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__A , __A )
return None if batch_size is np.inf else batch_size
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: str = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
snake_case: int = _PACKAGED_DATASETS_MODULES['parquet'][1]
snake_case: Optional[Any] = Parquet(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , hash=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.streaming:
snake_case: Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case: Dict = None
snake_case: List[Any] = None
snake_case: Optional[int] = None
snake_case: Any = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
snake_case: List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Union[str, Any] = dataset
snake_case: Any = path_or_buf
snake_case: Dict = batch_size or get_writer_batch_size(dataset.features )
snake_case: Optional[Any] = parquet_writer_kwargs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
snake_case: Any = self._write(file_obj=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
else:
snake_case: Optional[int] = self._write(file_obj=self.path_or_buf , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
return written
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = 0
snake_case: List[Any] = parquet_writer_kwargs.pop('path_or_buf' , SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.dataset.features.arrow_schema
snake_case: Optional[int] = pq.ParquetWriter(SCREAMING_SNAKE_CASE__ , schema=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , SCREAMING_SNAKE_CASE__ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
snake_case: Any = query_table(
table=self.dataset._data , key=slice(SCREAMING_SNAKE_CASE__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(SCREAMING_SNAKE_CASE__ )
written += batch.nbytes
writer.close()
return written | 329 | 0 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
_UpperCamelCase = """path-to-your-trained-model"""
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
_UpperCamelCase = """A photo of sks dog in a bucket"""
_UpperCamelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 719 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=_A ):
'''simple docstring'''
A__ = ['''flax''', '''transformers''']
def __init__( self : List[str] , *__A : int , **__A : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : List[Any] , *__A : Dict , **__A : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : Any , *__A : Any , **__A : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class lowerCamelCase__ ( metaclass=_A ):
'''simple docstring'''
A__ = ['''flax''', '''transformers''']
def __init__( self : int , *__A : Tuple , **__A : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *__A : int , **__A : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : int , *__A : List[Any] , **__A : List[str] ) -> int:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class lowerCamelCase__ ( metaclass=_A ):
'''simple docstring'''
A__ = ['''flax''', '''transformers''']
def __init__( self : Dict , *__A : str , **__A : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : List[str] , *__A : str , **__A : Any ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : Any , *__A : Any , **__A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class lowerCamelCase__ ( metaclass=_A ):
'''simple docstring'''
A__ = ['''flax''', '''transformers''']
def __init__( self : Tuple , *__A : Any , **__A : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : str , *__A : str , **__A : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : List[str] , *__A : Tuple , **__A : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
| 211 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Optional[int] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
A_ : Any = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowercase ) , x.transpose() ) )
A_ : List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _a (self ):
A_ : Optional[int] = np.random.randn(3 , 4 )
A_ : str = torch.tensor(lowercase )
self.assertTrue(np.allclose(transpose(lowercase ) , transpose(lowercase ).numpy() ) )
A_ : Tuple = np.random.randn(3 , 4 , 5 )
A_ : int = torch.tensor(lowercase )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , transpose(lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _a (self ):
A_ : str = np.random.randn(3 , 4 )
A_ : int = tf.constant(lowercase )
self.assertTrue(np.allclose(transpose(lowercase ) , transpose(lowercase ).numpy() ) )
A_ : Any = np.random.randn(3 , 4 , 5 )
A_ : List[str] = tf.constant(lowercase )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , transpose(lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _a (self ):
A_ : Optional[int] = np.random.randn(3 , 4 )
A_ : Optional[Any] = jnp.array(lowercase )
self.assertTrue(np.allclose(transpose(lowercase ) , np.asarray(transpose(lowercase ) ) ) )
A_ : str = np.random.randn(3 , 4 , 5 )
A_ : str = jnp.array(lowercase )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(lowercase , axes=(1, 2, 0) ) ) ) )
def _a (self ):
A_ : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , np.reshape(lowercase , (4, 3) ) ) )
A_ : int = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , np.reshape(lowercase , (12, 5) ) ) )
@require_torch
def _a (self ):
A_ : Optional[Any] = np.random.randn(3 , 4 )
A_ : List[Any] = torch.tensor(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , reshape(lowercase , (4, 3) ).numpy() ) )
A_ : Optional[int] = np.random.randn(3 , 4 , 5 )
A_ : int = torch.tensor(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , reshape(lowercase , (12, 5) ).numpy() ) )
@require_tf
def _a (self ):
A_ : Optional[Any] = np.random.randn(3 , 4 )
A_ : List[Any] = tf.constant(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , reshape(lowercase , (4, 3) ).numpy() ) )
A_ : Optional[int] = np.random.randn(3 , 4 , 5 )
A_ : int = tf.constant(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , reshape(lowercase , (12, 5) ).numpy() ) )
@require_flax
def _a (self ):
A_ : Dict = np.random.randn(3 , 4 )
A_ : str = jnp.array(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , np.asarray(reshape(lowercase , (4, 3) ) ) ) )
A_ : Optional[int] = np.random.randn(3 , 4 , 5 )
A_ : List[Any] = jnp.array(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , np.asarray(reshape(lowercase , (12, 5) ) ) ) )
def _a (self ):
A_ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowercase ) , np.squeeze(lowercase ) ) )
A_ : List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , np.squeeze(lowercase , axis=2 ) ) )
@require_torch
def _a (self ):
A_ : List[str] = np.random.randn(1 , 3 , 4 )
A_ : Any = torch.tensor(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase ) , squeeze(lowercase ).numpy() ) )
A_ : str = np.random.randn(1 , 4 , 1 , 5 )
A_ : Optional[int] = torch.tensor(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , squeeze(lowercase , axis=2 ).numpy() ) )
@require_tf
def _a (self ):
A_ : Tuple = np.random.randn(1 , 3 , 4 )
A_ : List[Any] = tf.constant(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase ) , squeeze(lowercase ).numpy() ) )
A_ : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
A_ : Dict = tf.constant(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , squeeze(lowercase , axis=2 ).numpy() ) )
@require_flax
def _a (self ):
A_ : Tuple = np.random.randn(1 , 3 , 4 )
A_ : Dict = jnp.array(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase ) , np.asarray(squeeze(lowercase ) ) ) )
A_ : str = np.random.randn(1 , 4 , 1 , 5 )
A_ : str = jnp.array(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , np.asarray(squeeze(lowercase , axis=2 ) ) ) )
def _a (self ):
A_ : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , np.expand_dims(lowercase , axis=1 ) ) )
@require_torch
def _a (self ):
A_ : Tuple = np.random.randn(3 , 4 )
A_ : str = torch.tensor(lowercase )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , expand_dims(lowercase , axis=1 ).numpy() ) )
@require_tf
def _a (self ):
A_ : List[Any] = np.random.randn(3 , 4 )
A_ : List[Any] = tf.constant(lowercase )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , expand_dims(lowercase , axis=1 ).numpy() ) )
@require_flax
def _a (self ):
A_ : Any = np.random.randn(3 , 4 )
A_ : Any = jnp.array(lowercase )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , np.asarray(expand_dims(lowercase , axis=1 ) ) ) ) | 667 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase__ ( _A , _A=0.9_99 , _A="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
a_ = []
for i in range(_A ):
a_ = i / num_diffusion_timesteps
a_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class __lowercase ( a__ , a__ ):
_lowerCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
_lowerCAmelCase = 2
@register_to_config
def __init__( self : int , lowercase__ : int = 1_0_0_0 , lowercase__ : float = 0.0_0085 , lowercase__ : float = 0.012 , lowercase__ : str = "linear" , lowercase__ : Optional[Union[np.ndarray, List[float]]] = None , lowercase__ : str = "epsilon" , lowercase__ : Optional[bool] = False , lowercase__ : Optional[bool] = False , lowercase__ : float = 1.0 , lowercase__ : str = "linspace" , lowercase__ : int = 0 , ):
if trained_betas is not None:
a_ = torch.tensor(lowercase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
a_ = torch.linspace(lowercase__ , lowercase__ , lowercase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowercase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a_ = betas_for_alpha_bar(lowercase__ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
a_ = betas_for_alpha_bar(lowercase__ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
a_ = 1.0 - self.betas
a_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowercase__ , lowercase__ , lowercase__ )
a_ = use_karras_sigmas
def __magic_name__ ( self : List[str] , lowercase__ : Tuple , lowercase__ : str=None ):
if schedule_timesteps is None:
a_ = self.timesteps
a_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a_ = 1 if len(lowercase__ ) > 1 else 0
else:
a_ = timestep.cpu().item() if torch.is_tensor(lowercase__ ) else timestep
a_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__ ( self : Union[str, Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__ ( self : Dict , lowercase__ : torch.FloatTensor , lowercase__ : Union[float, torch.FloatTensor] , ):
a_ = self.index_for_timestep(lowercase__ )
a_ = self.sigmas[step_index]
a_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__ ( self : int , lowercase__ : int , lowercase__ : Union[str, torch.device] = None , lowercase__ : Optional[int] = None , ):
a_ = num_inference_steps
a_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a_ = np.linspace(0 , num_train_timesteps - 1 , lowercase__ , dtype=lowercase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a_ = (np.arange(0 , lowercase__ ) * step_ratio).round()[::-1].copy().astype(lowercase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a_ = (np.arange(lowercase__ , 0 , -step_ratio )).round().copy().astype(lowercase__ )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
a_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a_ = np.log(lowercase__ )
a_ = np.interp(lowercase__ , np.arange(0 , len(lowercase__ ) ) , lowercase__ )
if self.config.use_karras_sigmas:
a_ = self._convert_to_karras(in_sigmas=lowercase__ , num_inference_steps=self.num_inference_steps )
a_ = np.array([self._sigma_to_t(lowercase__ , lowercase__ ) for sigma in sigmas] )
a_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a_ = torch.from_numpy(lowercase__ ).to(device=lowercase__ )
a_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
a_ = torch.from_numpy(lowercase__ )
a_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowercase__ ).startswith('''mps''' ):
# mps does not support float64
a_ = timesteps.to(lowercase__ , dtype=torch.floataa )
else:
a_ = timesteps.to(device=lowercase__ )
# empty dt and derivative
a_ = None
a_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a_ = defaultdict(lowercase__ )
def __magic_name__ ( self : Optional[int] , lowercase__ : List[Any] , lowercase__ : Optional[int] ):
# get log sigma
a_ = np.log(lowercase__ )
# get distribution
a_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
a_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
a_ = low_idx + 1
a_ = log_sigmas[low_idx]
a_ = log_sigmas[high_idx]
# interpolate sigmas
a_ = (low - log_sigma) / (low - high)
a_ = np.clip(lowercase__ , 0 , 1 )
# transform interpolation to time range
a_ = (1 - w) * low_idx + w * high_idx
a_ = t.reshape(sigma.shape )
return t
def __magic_name__ ( self : List[str] , lowercase__ : torch.FloatTensor , lowercase__ : Tuple ):
a_ = in_sigmas[-1].item()
a_ = in_sigmas[0].item()
a_ = 7.0 # 7.0 is the value used in the paper
a_ = np.linspace(0 , 1 , lowercase__ )
a_ = sigma_min ** (1 / rho)
a_ = sigma_max ** (1 / rho)
a_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __magic_name__ ( self : Optional[int] ):
return self.dt is None
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Union[torch.FloatTensor, np.ndarray] , lowercase__ : Union[float, torch.FloatTensor] , lowercase__ : Union[torch.FloatTensor, np.ndarray] , lowercase__ : bool = True , ):
a_ = self.index_for_timestep(lowercase__ )
# advance index counter by 1
a_ = timestep.cpu().item() if torch.is_tensor(lowercase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a_ = self.sigmas[step_index]
a_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
a_ = self.sigmas[step_index - 1]
a_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a_ = 0
a_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a_ = sigma_hat if self.state_in_first_order else sigma_next
a_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a_ = sigma_hat if self.state_in_first_order else sigma_next
a_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
a_ = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
a_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a_ = sigma_next - sigma_hat
# store for 2nd order step
a_ = derivative
a_ = dt
a_ = sample
else:
# 2. 2nd order / Heun's method
a_ = (sample - pred_original_sample) / sigma_next
a_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
a_ = self.dt
a_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
a_ = None
a_ = None
a_ = None
a_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase__ )
def __magic_name__ ( self : Optional[Any] , lowercase__ : torch.FloatTensor , lowercase__ : torch.FloatTensor , lowercase__ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
a_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowercase__ ):
# mps does not support float64
a_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
a_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
a_ = self.timesteps.to(original_samples.device )
a_ = timesteps.to(original_samples.device )
a_ = [self.index_for_timestep(lowercase__ , lowercase__ ) for t in timesteps]
a_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a_ = sigma.unsqueeze(-1 )
a_ = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[Any] ):
return self.config.num_train_timesteps
| 143 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCamelCase__ = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCamelCase__ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __lowercase :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
class __lowercase :
def __init__( self : int , lowercase__ : Iterable[int] ):
a_ = None
for i in sorted(lowercase__ , reverse=lowercase__ ):
a_ = Node(lowercase__ , self.head )
def __iter__( self : str ):
a_ = self.head
while node:
yield node.data
a_ = node.next_node
def __len__( self : Optional[int] ):
return sum(1 for _ in self )
def __str__( self : Optional[Any] ):
return " -> ".join([str(lowercase__ ) for node in self] )
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
return SortedLinkedList(list(_A ) + list(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 143 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( a__ , unittest.TestCase):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase):
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = ort.SessionOptions()
lowerCamelCase : str = False
return options
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowerCamelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowerCamelCase : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting', revision='onnx', safety_checker=A, feature_extractor=A, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Dict = 'A red cat sitting on a park bench'
lowerCamelCase : str = np.random.RandomState(0 )
lowerCamelCase : Optional[Any] = pipe(
prompt=A, image=A, mask_image=A, guidance_scale=7.5, num_inference_steps=10, generator=A, output_type='np', )
lowerCamelCase : Tuple = output.images
lowerCamelCase : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase : Optional[Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowerCamelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowerCamelCase : List[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting', subfolder='scheduler', revision='onnx' )
lowerCamelCase : Optional[int] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting', revision='onnx', scheduler=A, safety_checker=A, feature_extractor=A, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Any = 'A red cat sitting on a park bench'
lowerCamelCase : Any = np.random.RandomState(0 )
lowerCamelCase : Any = pipe(
prompt=A, image=A, mask_image=A, guidance_scale=7.5, num_inference_steps=20, generator=A, output_type='np', )
lowerCamelCase : int = output.images
lowerCamelCase : List[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase : Optional[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 320 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self, A, A=13, A=32, A=2, A=3, A=16, A=[1, 2, 1], A=[2, 2, 4], A=2, A=2.0, A=True, A=0.0, A=0.0, A=0.1, A="gelu", A=False, A=True, A=0.02, A=1e-5, A=True, A=None, A=True, A=10, A=8, ):
"""simple docstring"""
lowerCamelCase : int = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : List[Any] = num_channels
lowerCamelCase : Tuple = embed_dim
lowerCamelCase : Dict = depths
lowerCamelCase : Optional[Any] = num_heads
lowerCamelCase : Tuple = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : List[str] = qkv_bias
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Any = attention_probs_dropout_prob
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Optional[int] = use_absolute_embeddings
lowerCamelCase : Dict = patch_norm
lowerCamelCase : Union[str, Any] = layer_norm_eps
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : Optional[int] = scope
lowerCamelCase : Any = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : Optional[Any] = encoder_stride
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : int = SwinvaModel(config=A )
model.to(A )
model.eval()
lowerCamelCase : str = model(A )
lowerCamelCase : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : Tuple = SwinvaForMaskedImageModeling(config=A )
model.to(A )
model.eval()
lowerCamelCase : Tuple = model(A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[str] = SwinvaForMaskedImageModeling(A )
model.to(A )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : List[Any] = model(A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : List[str] = self.type_sequence_label_size
lowerCamelCase : int = SwinvaForImageClassification(A )
model.to(A )
model.eval()
lowerCamelCase : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( a__ , a__ , unittest.TestCase):
_lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = SwinvaModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self, config_class=A, embed_dim=37 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A, nn.Linear ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = model_class(A )
lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Dict = [*signature.parameters.keys()]
lowerCamelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = True
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = True
lowerCamelCase : List[Any] = False
lowerCamelCase : str = True
lowerCamelCase : Union[str, Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(A, A ) )
lowerCamelCase : Tuple = outputs.attentions
lowerCamelCase : Union[str, Any] = len(self.model_tester.depths )
self.assertEqual(len(A ), A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase : List[str] = True
lowerCamelCase : int = config.window_size**2
lowerCamelCase : str = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase : Any = model(**self._prepare_for_class(A, A ) )
lowerCamelCase : str = outputs.attentions
self.assertEqual(len(A ), A )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], )
lowerCamelCase : Optional[Any] = len(A )
# Check attention is always last and order is fine
lowerCamelCase : List[str] = True
lowerCamelCase : Any = True
lowerCamelCase : Tuple = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(A, A ) )
if hasattr(self.model_tester, 'num_hidden_states_types' ):
lowerCamelCase : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCamelCase : Tuple = 2
self.assertEqual(out_len + added_hidden_states, len(A ) )
lowerCamelCase : int = outputs.attentions
self.assertEqual(len(A ), A )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], )
def UpperCAmelCase_ ( self, A, A, A, A ):
"""simple docstring"""
lowerCamelCase : int = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**self._prepare_for_class(A, A ) )
lowerCamelCase : Dict = outputs.hidden_states
lowerCamelCase : List[Any] = getattr(
self.model_tester, 'expected_num_hidden_layers', len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A ), A )
# Swinv2 has a different seq_length
lowerCamelCase : Any = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
lowerCamelCase : List[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(A ), A )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
lowerCamelCase : Union[str, Any] = (
reshaped_hidden_states[0].view(A, A, height * width ).permute(0, 2, 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase : str = True
self.check_hidden_states_output(A, A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Dict = True
self.check_hidden_states_output(A, A, A, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[Any] = 3
lowerCamelCase : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase : Tuple = True
self.check_hidden_states_output(A, A, A, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[str] = True
self.check_hidden_states_output(A, A, A, (padded_height, padded_width) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = SwinvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = _config_zero_init(A )
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(config=A )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@require_vision
@require_torch
class __snake_case ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
A )
lowerCamelCase : Union[str, Any] = self.default_image_processor
lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**A )
# verify the logits
lowerCamelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, A )
lowerCamelCase : List[Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1e-4 ) )
| 320 | 1 |
"""simple docstring"""
import os
import sys
import unittest
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase_ = os.path.join(git_repo_path, '''src''', '''transformers''')
lowerCAmelCase_ = "\n{0} = None\n"
lowerCAmelCase_ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
lowerCAmelCase_ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""")
self.assertIsNone(A_)
_SCREAMING_SNAKE_CASE : Optional[int] = find_backend(""" if not is_tokenizers_available():""")
self.assertEqual(A_ , """tokenizers""")
_SCREAMING_SNAKE_CASE : Optional[Any] = find_backend(""" if not is_tensorflow_text_available():""")
self.assertEqual(A_ , """tensorflow_text""")
_SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""")
self.assertEqual(A_ , """sentencepiece_and_tokenizers""")
_SCREAMING_SNAKE_CASE : int = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""")
self.assertEqual(A_ , """sentencepiece_and_tensorflow_text""")
_SCREAMING_SNAKE_CASE : List[Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""")
self.assertEqual(A_ , """sentencepiece_and_tokenizers_and_vision""")
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , A_)
self.assertIn("""tensorflow_text""" , A_)
self.assertIn("""sentencepiece_and_tokenizers""" , A_)
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""])
self.assertIn("""TFBertModel""" , objects["""tf"""])
self.assertIn("""FlaxBertModel""" , objects["""flax"""])
self.assertIn("""BertModel""" , objects["""torch"""])
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""])
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""])
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = create_dummy_object("""CONSTANT""" , """\'torch\'""")
self.assertEqual(A_ , """\nCONSTANT = None\n""")
_SCREAMING_SNAKE_CASE : List[str] = create_dummy_object("""function""" , """\'torch\'""")
self.assertEqual(
A_ , """\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n""")
_SCREAMING_SNAKE_CASE : Optional[int] = """
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = create_dummy_object("""FakeClass""" , """\'torch\'""")
self.assertEqual(A_ , A_)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]})
self.assertEqual(dummy_files["""torch"""] , A_)
| 705 | """simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : int = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : List[Any] = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = parquet_path
elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
_SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for split in splits:
_SCREAMING_SNAKE_CASE : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
if split:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE : Optional[int] = """train"""
_SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE : str = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
| 635 | 0 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ : Dict = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : int = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 115 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__lowerCAmelCase : List[str] = "__DUMMY_TRANSFORMERS_USER__"
__lowerCAmelCase : Optional[int] = "Dummy User"
__lowerCAmelCase : Union[str, Any] = "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"
__lowerCAmelCase : Any = "https://hub-ci.huggingface.co"
__lowerCAmelCase : str = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
__lowerCAmelCase : Optional[int] = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
__lowerCAmelCase : int = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Optional[int]:
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , UpperCamelCase__ )
@pytest.fixture
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , UpperCamelCase__ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , UpperCamelCase__ )
@pytest.fixture
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , UpperCamelCase__ )
@pytest.fixture
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
HfFolder.save_token(UpperCamelCase__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def UpperCAmelCase_ ( ) -> Optional[int]:
return HfApi(endpoint=UpperCamelCase__ )
@pytest.fixture(scope='''session''' )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
__lowercase : Union[str, Any] = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase__ )
@pytest.fixture
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Tuple:
def _cleanup_repo(__lowerCAmelCase ):
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
@contextmanager
def _temporary_repo(__lowerCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase__ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
__lowercase : Any = F'repo_txt_data-{int(time.time() * 10E3 )}'
__lowercase : Any = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' , private=UpperCamelCase__ )
hf_api.upload_file(
token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo='''data/text_data.txt''' , repo_id=UpperCamelCase__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
__lowercase : Union[str, Any] = F'repo_zipped_txt_data-{int(time.time() * 10E3 )}'
__lowercase : Dict = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' , private=UpperCamelCase__ )
hf_api.upload_file(
token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo='''data.zip''' , repo_id=UpperCamelCase__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
__lowercase : Any = F'repo_zipped_img_data-{int(time.time() * 10E3 )}'
__lowercase : List[Any] = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' , private=UpperCamelCase__ )
hf_api.upload_file(
token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo='''data.zip''' , repo_id=UpperCamelCase__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
return hf_private_dataset_repo_zipped_img_data_
| 703 |
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__lowercase : Dict = 0
__lowercase : Optional[int] = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
__lowercase : str = [int(__lowerCAmelCase ) for i in num_string]
__lowercase : Union[str, Any] = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
__lowercase : Any = str(__lowerCAmelCase )
steps += 1
return steps
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__lowercase : Any = 0
__lowercase : Union[str, Any] = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
__lowercase : List[str] = [int(__lowerCAmelCase ) for i in num_string]
__lowercase : Tuple = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
__lowercase : List[str] = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "ibert"
def __init__( self : Union[str, Any] ,A : Tuple=3_05_22 ,A : Optional[Any]=7_68 ,A : List[Any]=12 ,A : Optional[Any]=12 ,A : List[str]=30_72 ,A : Union[str, Any]="gelu" ,A : str=0.1 ,A : int=0.1 ,A : Dict=5_12 ,A : str=2 ,A : Any=0.02 ,A : str=1E-12 ,A : List[str]=1 ,A : str=0 ,A : Optional[Any]=2 ,A : Union[str, Any]="absolute" ,A : Optional[int]=False ,A : Any="none" ,**A : str ,):
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = quant_mode
__A = force_dequant
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Tuple ):
if self.task == "multiple-choice":
__A = {0: "batch", 1: "choice", 2: "sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 55 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,):
__A = size if size is not None else {"height": 20, "width": 20}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_reduce_labels
def UpperCamelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(dataset[0]["file"] )
__A = Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(ds[0]["file"] )
__A = Image.open(ds[1]["file"] )
__A = Image.open(ds[2]["file"] )
__A = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = BeitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,A )
__A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[str] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : str ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
__A = []
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__A , __A = prepare_semantic_batch_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__A = True
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 55 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase = dict(zip(A_ , range(len(A_ ) ) ) )
__lowercase = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6_0_0_0,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A_ ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A_ ) + """\n""" )
# load decoder from hub
__lowercase = """hf-internal-testing/ngram-beam-search-decoder"""
def SCREAMING_SNAKE_CASE_ ( self : Dict , **A_ : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **A_ : Optional[Any] ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **A_ : Optional[Any] ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(A_ , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
__lowercase = floats_list((3, 1_0_0_0) )
__lowercase = feature_extractor(A_ , return_tensors="""np""" )
__lowercase = processor(A_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
__lowercase = """This is a test string"""
__lowercase = processor(text=A_ )
__lowercase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : int=(2, 1_0, 1_6) , A_ : Tuple=7_7 ):
'''simple docstring'''
np.random.seed(A_ )
return np.random.rand(*A_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
__lowercase = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__lowercase = processor.decode(A_ )
__lowercase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : Dict ):
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
__lowercase = processor.batch_decode(A_ , A_ )
__lowercase = list(A_ )
with get_context("""fork""" ).Pool() as p:
__lowercase = decoder.decode_beams_batch(A_ , A_ )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
__lowercase = self._get_dummy_logits()
__lowercase = 1_5
__lowercase = -20.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
__lowercase = decoded_processor_out.text
__lowercase = list(A_ )
with get_context("""fork""" ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , A_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , A_ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -20.0
__lowercase = True
__lowercase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
__lowercase = decoded_processor_out.text
__lowercase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context("""fork""" ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
A_ , A_ , )
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , A_ )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase = os.listdir(A_ )
__lowercase = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(A_ )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase = os.listdir(A_ )
__lowercase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase = floats_list((3, 1_0_0_0) )
__lowercase = processor_wavaveca(A_ , return_tensors="""np""" )
__lowercase = processor_auto(A_ , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(A_ )
__lowercase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( A_ : Any , A_ : Optional[int] ):
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(A_ , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
import torch
__lowercase = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=A_ )
__lowercase = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__lowercase = iter(A_ )
__lowercase = next(A_ )
__lowercase = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase = model(A_ ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] , output_word_offsets=A_ )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(A_ , """word""" ) ) , A_ )
self.assertEqual(""" """.join(self.get_from_offsets(A_ , """word""" ) ) , output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(A_ , """start_time""" ) )
__lowercase = torch.tensor(self.get_from_offsets(A_ , """end_time""" ) )
# fmt: off
__lowercase = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
__lowercase = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 442 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ ={
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
UpperCAmelCase__ =[
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
UpperCAmelCase__ =[
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
UpperCAmelCase__ =[
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 442 | 1 |
from sklearn.metrics import matthews_corrcoef
import datasets
a_ = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
a_ = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
a_ = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__UpperCAmelCase , __UpperCAmelCase , sample_weight=__UpperCAmelCase ) ),
}
| 175 |
a_ = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def a__ ( _UpperCamelCase : float ):
assert type(_UpperCamelCase ) in (int, float) and decimal == int(_UpperCamelCase )
__lowerCamelCase = int(_UpperCamelCase )
__lowerCamelCase = ''''''
__lowerCamelCase = False
if decimal < 0:
__lowerCamelCase = True
decimal *= -1
while decimal > 0:
__lowerCamelCase ,__lowerCamelCase = divmod(_UpperCamelCase ,16 )
__lowerCamelCase = values[remainder] + hexadecimal
__lowerCamelCase = '''0x''' + hexadecimal
if negative:
__lowerCamelCase = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ="""The Nymphenburg Palace is a beautiful palace in Munich!"""
def __a ( A , A ) -> Any:
'''simple docstring'''
A__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
A__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=A , output_all_encodings=A , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , A ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
A__ = os.path.join(get_home_dir() , "models" )
A__ = _load_vocab(A , A , A , cls=A )
A__ = nlp.model.BERTModel(
A , len(A ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=A , use_token_type_embed=A , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=A , use_decoder=A , )
original_bort.load_parameters(A , cast_dtype=A , ignore_extra=A )
A__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
A__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(A ),
}
A__ = BertConfig.from_dict(A )
A__ = BertForMaskedLM(A )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A , A ):
A__ = hf_param.shape
A__ = to_torch(params[gluon_param] )
A__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A__ = hf_bort_model.bert.encoder.layer[i]
# self attention
A__ = layer.attention.self
A__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
A__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
A__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
A__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
A__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
A__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
A__ = layer.attention.output
A__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
A__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
A__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
A__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
A__ = layer.intermediate
A__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
A__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
A__ = layer.output
A__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
A__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
A__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
A__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A__ = RobertaTokenizer.from_pretrained("roberta-base" )
A__ = tokenizer.encode_plus(A )["input_ids"]
# Get gluon output
A__ = mx.nd.array([input_ids] )
A__ = original_bort(inputs=A , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(A )
A__ = BertModel.from_pretrained(A )
hf_bort_model.eval()
A__ = tokenizer.encode_plus(A , return_tensors="pt" )
A__ = hf_bort_model(**A )[0]
A__ = output_gluon[0].asnumpy()
A__ = output_hf[0].detach().numpy()
A__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A__ = np.allclose(A , A , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , A )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCAmelCase =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 721 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
__UpperCAmelCase ="""
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
__UpperCAmelCase ="""
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
__UpperCAmelCase ="""\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __a ( A , A , A , A , A = None , A = False , ) -> Optional[int]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
A__ = new_id
# turn into Numpy arrays
A__ = np.array(A )
A__ = np.array(A )
if reduce_labels:
A__ = 255
A__ = label - 1
A__ = 255
A__ = label != ignore_index
A__ = np.not_equal(A , A )
A__ = pred_label[mask]
A__ = np.array(A )[mask]
A__ = pred_label[pred_label == label]
A__ = np.histogram(A , bins=A , range=(0, num_labels - 1) )[0]
A__ = np.histogram(A , bins=A , range=(0, num_labels - 1) )[0]
A__ = np.histogram(A , bins=A , range=(0, num_labels - 1) )[0]
A__ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __a ( A , A , A , A , A = None , A = False , ) -> Union[str, Any]:
'''simple docstring'''
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(A , A ):
A__ , A__ , A__ , A__ = intersect_and_union(
A , A , A , A , A , A )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __a ( A , A , A , A , A = None , A = None , A = False , ) -> int:
'''simple docstring'''
A__ , A__ , A__ , A__ = total_intersect_and_union(
A , A , A , A , A , A )
# compute metrics
A__ = {}
A__ = total_area_intersect.sum() / total_area_label.sum()
A__ = total_area_intersect / total_area_union
A__ = total_area_intersect / total_area_label
A__ = np.nanmean(A )
A__ = np.nanmean(A )
A__ = all_acc
A__ = iou
A__ = acc
if nan_to_num is not None:
A__ = {metric: np.nan_to_num(A , nan=A ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowercase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
'''simple docstring'''
A__ = mean_iou(
results=UpperCamelCase__ , gt_seg_maps=UpperCamelCase__ , num_labels=UpperCamelCase__ , ignore_index=UpperCamelCase__ , nan_to_num=UpperCamelCase__ , label_map=UpperCamelCase__ , reduce_labels=UpperCamelCase__ , )
return iou_result | 261 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : Optional[int] = logging.get_logger(__name__)
A : Union[str, Any] = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class a_ ( _a ):
a : int = '''detr'''
a : Dict = ['''past_key_values''']
a : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=100 , __UpperCamelCase=6 , __UpperCamelCase=2_048 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=2_048 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=256 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_lowercase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowercase = backbone_config.get("""model_type""" )
_lowercase = CONFIG_MAPPING[backbone_model_type]
_lowercase = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
_lowercase , _lowercase , _lowercase = None, None, None
_lowercase = use_timm_backbone
_lowercase = backbone_config
_lowercase = num_channels
_lowercase = num_queries
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = init_xavier_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = encoder_layers
_lowercase = auxiliary_loss
_lowercase = position_embedding_type
_lowercase = backbone
_lowercase = use_pretrained_backbone
_lowercase = dilation
# Hungarian matcher
_lowercase = class_cost
_lowercase = bbox_cost
_lowercase = giou_cost
# Loss coefficients
_lowercase = mask_loss_coefficient
_lowercase = dice_loss_coefficient
_lowercase = bbox_loss_coefficient
_lowercase = giou_loss_coefficient
_lowercase = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def UpperCamelCase_ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self ):
return self.d_model
@classmethod
def UpperCamelCase_ ( cls , __UpperCamelCase , **__UpperCamelCase ):
return cls(backbone_config=__UpperCamelCase , **__UpperCamelCase )
def UpperCamelCase_ ( self ):
_lowercase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowercase = self.backbone_config.to_dict()
_lowercase = self.__class__.model_type
return output
class a_ ( _a ):
a : Optional[Any] = version.parse('''1.11''' )
@property
def UpperCamelCase_ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase_ ( self ):
return 1E-5
@property
def UpperCamelCase_ ( self ):
return 12 | 287 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 287 | 1 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase : int = TypeVar('T')
UpperCAmelCase : Optional[int] = Union[List[T], Tuple[T, ...]]
UpperCAmelCase : List[Any] = Union[T, List[T], Dict[str, T]]
UpperCAmelCase : Dict = Union[str, bytes, os.PathLike]
| 721 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Tuple = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : str = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Optional[Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys())
UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCamelCase (pl.LightningModule ):
def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase__ )
_snake_case : Union[str, Any] = 0
_snake_case : int = Path(self.hparams.output_dir )
_snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_snake_case : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , )
else:
_snake_case : PretrainedConfig = config
_snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase__ , lowercase__ ):
assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) )
if tokenizer is None:
_snake_case : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , )
else:
_snake_case : PreTrainedTokenizer = tokenizer
_snake_case : Any = MODEL_MODES[mode]
if model is None:
_snake_case : List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , )
else:
_snake_case : Optional[Any] = model
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
_snake_case : Optional[int] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = self.model
_snake_case : List[Any] = ['''bias''', '''LayerNorm.weight''']
_snake_case : List[str] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
_snake_case : Any = Adafactor(
lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ )
else:
_snake_case : List[str] = AdamW(
lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_snake_case : List[str] = optimizer
_snake_case : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
return self.validation_step(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple:
"""simple docstring"""
return self.validation_end(lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
if stage == "test":
_snake_case : Any = len(self.test_dataloader().dataset )
else:
_snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ )
_snake_case : Optional[int] = len(self.train_dataloader().dataset )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' )
_snake_case : Tuple = self.step_count
self.model.save_pretrained(lowercase__ )
self.tokenizer.save_pretrained(lowercase__ )
@staticmethod
def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase__ )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : Any = trainer.lr_schedulers[0]['''scheduler''']
_snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log results
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log and save results to file
_snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase__ , '''w''' ) as writer:
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_snake_case : Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
_snake_case : Any = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase_ )
if logging_callback is None:
_snake_case : str = LoggingCallback()
_snake_case : Tuple = {}
if args.fpaa:
_snake_case : Union[str, Any] = 16
if args.gpus > 1:
_snake_case : Optional[Any] = '''auto'''
_snake_case : Tuple = '''ddp'''
_snake_case : Optional[Any] = args.accumulate_grad_batches
_snake_case : Tuple = None
_snake_case : str = '''auto'''
_snake_case : int = pl.Trainer.from_argparse_args(
lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , )
if args.do_train:
trainer.fit(lowerCAmelCase_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 47 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['ViTFeatureExtractor']
__SCREAMING_SNAKE_CASE = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_a , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_a , 'num_attention_heads' ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=32 , _a=2 , _a=3 , _a=640 , _a=4 , _a="silu" , _a=3 , _a=32 , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , ):
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = last_hidden_size
a__ = num_attention_heads
a__ = hidden_act
a__ = conv_kernel_size
a__ = output_stride
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = classifier_dropout_prob
a__ = use_labels
a__ = is_training
a__ = num_labels
a__ = initializer_range
a__ = scope
def lowercase__ ( self ):
"""simple docstring"""
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.num_labels )
a__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self , _a , _a , _a , _a ):
"""simple docstring"""
a__ = MobileViTModel(config=_a )
model.to(_a )
model.eval()
a__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self , _a , _a , _a , _a ):
"""simple docstring"""
a__ = self.num_labels
a__ = MobileViTForImageClassification(_a )
model.to(_a )
model.eval()
a__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , _a , _a , _a , _a ):
"""simple docstring"""
a__ = self.num_labels
a__ = MobileViTForSemanticSegmentation(_a )
model.to(_a )
model.eval()
a__ = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a__ = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Optional[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE:List[Any] = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE:List[Any] = False
SCREAMING_SNAKE_CASE:Optional[Any] = False
SCREAMING_SNAKE_CASE:Union[str, Any] = False
SCREAMING_SNAKE_CASE:str = False
def lowercase__ ( self ):
"""simple docstring"""
a__ = MobileViTModelTester(self )
a__ = MobileViTConfigTester(self , config_class=_a , has_text_modality=_a )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_a )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(_a , _a , _a ):
a__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(_a , _a ) )
a__ = outputs.hidden_states
a__ = 5
self.assertEqual(len(_a ) , _a )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a__ = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(_a , _a , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = MobileViTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
a__ = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_a )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
a__ = model(**_a )
# verify the logits
a__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
a__ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
a__ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = model.to(_a )
a__ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = prepare_img()
a__ = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
a__ = model(**_a )
a__ = outputs.logits
# verify the logits
a__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
a__ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
a__ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = model.to(_a )
a__ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = prepare_img()
a__ = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
a__ = model(**_a )
a__ = outputs.logits.detach().cpu()
a__ = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
a__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
a__ = image_processor.post_process_semantic_segmentation(outputs=_a )
a__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 394 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a_ = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls ) -> Any:
__lowercase : str = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def _lowerCamelCase ( cls ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__lowercase : List[str] = FlaxBertModel(lowerCamelCase_ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__lowercase : Any = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__lowercase : Tuple = flatten_dict(unfreeze(model.params ) )
__lowercase : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase_ , repo_id='''test-model-flax''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
__lowercase : List[Any] = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__lowercase : Optional[int] = flatten_dict(unfreeze(model.params ) )
__lowercase : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1E-3 , msg=F"""{key} not identical""" )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[int] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__lowercase : Dict = FlaxBertModel(lowerCamelCase_ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__lowercase : Any = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__lowercase : Tuple = flatten_dict(unfreeze(model.params ) )
__lowercase : Union[str, Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase_ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
__lowercase : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__lowercase : Optional[int] = flatten_dict(unfreeze(model.params ) )
__lowercase : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1E-3 , msg=F"""{key} not identical""" )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Dict = True
__lowercase : Union[str, Any] = flatten_dict(modela.params )
__lowercase : int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__lowercase : Optional[Any] = False
return models_are_equal
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase_ )
__lowercase : Union[str, Any] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
with self.assertRaises(lowerCamelCase_ ):
__lowercase : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase_ )
__lowercase : List[Any] = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertTrue(check_models_equal(lowerCamelCase_ , lowerCamelCase_ ) )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Optional[int] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__lowercase : Optional[int] = FlaxBertModel(lowerCamelCase_ )
__lowercase : List[Any] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , max_shard_size='''10KB''' )
with self.assertRaises(lowerCamelCase_ ):
__lowercase : str = FlaxBertModel.from_pretrained(lowerCamelCase_ )
__lowercase : List[str] = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertTrue(check_models_equal(lowerCamelCase_ , lowerCamelCase_ ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Tuple = '''bert'''
__lowercase : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(lowerCamelCase_ ):
__lowercase : Union[str, Any] = FlaxBertModel.from_pretrained(lowerCamelCase_ )
__lowercase : Union[str, Any] = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Optional[Any] = '''bert'''
__lowercase : Dict = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(lowerCamelCase_ ):
__lowercase : Union[str, Any] = FlaxBertModel.from_pretrained(lowerCamelCase_ )
__lowercase : str = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 710 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Tuple = len(__UpperCamelCase )
for _ in range(__UpperCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowercase ,__lowercase : str = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a_ = list(range(1_0, 0, -1))
print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 523 | 0 |
def _snake_case ( __snake_case = 10**12 ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> np.array:
'''simple docstring'''
A = int(np.ceil((x_end - xa) / step_size ) )
A = np.zeros((n + 1,) )
A = ya
A = xa
for k in range(lowerCAmelCase__ ):
A = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
A = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase__ , y[k] ) + ode_func(x + step_size , lowerCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case : Union[str, Any] = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 706 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase__ = model_type_to_module_name(lowerCAmelCase_ )
lowerCAmelCase__ = importlib.import_module(F'.{module_name}' , "transformers.models" )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , "__name__" , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase__ = importlib.import_module("transformers" )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def _A ( lowerCAmelCase_ : Union[str, os.PathLike] , lowerCAmelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[Dict[str, str]] = None , lowerCAmelCase_ : Optional[Union[bool, str]] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(lowerCAmelCase_ , encoding="utf-8" ) as reader:
return json.load(lowerCAmelCase_ )
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : int ) -> int:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(SCREAMING_SNAKE_CASE__ )
def a ( cls : str , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
lowerCAmelCase__ = kwargs.pop("config" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = kwargs.pop("trust_remote_code" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = True
lowerCAmelCase__ , lowerCAmelCase__ = FeatureExtractionMixin.get_feature_extractor_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = config_dict.get("feature_extractor_type" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
lowerCAmelCase__ = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# It could be in `config.feature_extractor_type``
lowerCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , "feature_extractor_type" , SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
lowerCAmelCase__ = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
lowerCAmelCase__ = feature_extractor_class_from_name(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = feature_extractor_auto_map is not None
lowerCAmelCase__ = feature_extractor_class is not None or type(SCREAMING_SNAKE_CASE__ ) in FEATURE_EXTRACTOR_MAPPING
lowerCAmelCase__ = resolve_trust_remote_code(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if has_remote_code and trust_remote_code:
lowerCAmelCase__ = get_class_from_dynamic_module(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = kwargs.pop("code_revision" , SCREAMING_SNAKE_CASE__ )
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(SCREAMING_SNAKE_CASE__ ) in FEATURE_EXTRACTOR_MAPPING:
lowerCAmelCase__ = FEATURE_EXTRACTOR_MAPPING[type(SCREAMING_SNAKE_CASE__ )]
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
raise ValueError(
f'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
f'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
FEATURE_EXTRACTOR_MAPPING.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 61 |
from __future__ import annotations
lowercase : Dict = tuple[int, int, int]
lowercase : List[str] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase : int = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase : Optional[int] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowercase : Optional[Any] = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowercase : str = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowercase : Tuple = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowercase : Optional[int] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowercase : List[Any] = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowercase : Dict = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowercase : List[Any] = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowercase : Optional[Any] = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowercase : Tuple = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : RotorPositionT , _lowerCamelCase : RotorSelectionT , _lowerCamelCase : str) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
if (unique_rotsel := len(set(_lowerCamelCase))) < 3:
__UpperCamelCase : Tuple = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(_lowerCamelCase)
# Checks if rotor positions are valid
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = rotpos
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(_lowerCamelCase)
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : int = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowerCamelCase)
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : List[Any] = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowerCamelCase)
# Validates string and returns dict
__UpperCamelCase : str = _plugboard(_lowerCamelCase)
return rotpos, rotsel, pbdict
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> dict[str, str]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'Plugboard setting isn\'t type string ({type(_lowerCamelCase)})'
raise TypeError(_lowerCamelCase)
elif len(_lowerCamelCase) % 2 != 0:
__UpperCamelCase : int = F'Odd number of symbols ({len(_lowerCamelCase)})'
raise Exception(_lowerCamelCase)
elif pbstring == "":
return {}
pbstring.replace(" " , "")
# Checks if all characters are unique
__UpperCamelCase : Optional[int] = set()
for i in pbstring:
if i not in abc:
__UpperCamelCase : Tuple = F'\'{i}\' not in list of symbols'
raise Exception(_lowerCamelCase)
elif i in tmppbl:
__UpperCamelCase : Tuple = F'Duplicate symbol ({i})'
raise Exception(_lowerCamelCase)
else:
tmppbl.add(_lowerCamelCase)
del tmppbl
# Created the dictionary
__UpperCamelCase : Union[str, Any] = {}
for j in range(0 , len(_lowerCamelCase) - 1 , 2):
__UpperCamelCase : Union[str, Any] = pbstring[j + 1]
__UpperCamelCase : str = pbstring[j]
return pb
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : RotorPositionT , _lowerCamelCase : RotorSelectionT = (rotora, rotora, rotora) , _lowerCamelCase : str = "" , ) -> str:
'''simple docstring'''
__UpperCamelCase : List[Any] = text.upper()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = _validator(
_lowerCamelCase , _lowerCamelCase , plugb.upper())
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = rotor_position
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__UpperCamelCase : List[str] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__UpperCamelCase : Dict = plugboard[symbol]
# rotor ra --------------------------
__UpperCamelCase : Optional[int] = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : List[Any] = rotora[index % len(_lowerCamelCase)]
# rotor rb --------------------------
__UpperCamelCase : Dict = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : Any = rotora[index % len(_lowerCamelCase)]
# rotor rc --------------------------
__UpperCamelCase : str = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : Union[str, Any] = rotora[index % len(_lowerCamelCase)]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__UpperCamelCase : Union[str, Any] = reflector[symbol]
# 2nd rotors
__UpperCamelCase : Optional[int] = abc[rotora.index(_lowerCamelCase) - rotorposa]
__UpperCamelCase : Optional[Any] = abc[rotora.index(_lowerCamelCase) - rotorposa]
__UpperCamelCase : str = abc[rotora.index(_lowerCamelCase) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__UpperCamelCase : Optional[Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : Any = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : Tuple = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : List[str] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCamelCase)
return "".join(_lowerCamelCase)
if __name__ == "__main__":
lowercase : Optional[Any] = 'This is my Python script that emulates the Enigma machine from WWII.'
lowercase : int = (1, 1, 1)
lowercase : Optional[Any] = 'pictures'
lowercase : Optional[Any] = (rotora, rotora, rotora)
lowercase : Optional[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb)) | 557 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ =logging.get_logger(__name__)
def _a ( UpperCAmelCase__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' )
if "model" in sd.keys():
__SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__SCREAMING_SNAKE_CASE = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__SCREAMING_SNAKE_CASE = sd.pop(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__SCREAMING_SNAKE_CASE = sd[key]
# We split QKV in separate Q,K,V
__SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__SCREAMING_SNAKE_CASE = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(UpperCAmelCase__ , depth // 3 , dim=0 )
__SCREAMING_SNAKE_CASE = q
__SCREAMING_SNAKE_CASE = k
__SCREAMING_SNAKE_CASE = v
del sd[key]
return sd
@torch.no_grad()
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = load_checkpoint(UpperCAmelCase__ )
if config is not None:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = OPTConfig()
__SCREAMING_SNAKE_CASE = OPTModel(UpperCAmelCase__ ).half().eval()
model.load_state_dict(UpperCAmelCase__ )
# Check results
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
lowerCAmelCase__ =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 690 |
"""simple docstring"""
def _a ( UpperCAmelCase__ ) -> str:
__SCREAMING_SNAKE_CASE = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _a ( UpperCAmelCase__ ) -> dict[str, str]:
__SCREAMING_SNAKE_CASE = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__SCREAMING_SNAKE_CASE = remove_duplicates(key.upper() )
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# First fill cipher with key characters
__SCREAMING_SNAKE_CASE = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCAmelCase__ ) , 26 ):
__SCREAMING_SNAKE_CASE = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__SCREAMING_SNAKE_CASE = alphabet[i - offset]
__SCREAMING_SNAKE_CASE = char
return cipher_alphabet
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
return "".join(cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
__SCREAMING_SNAKE_CASE = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() )
def _a ( ) -> None:
__SCREAMING_SNAKE_CASE = input('''Enter message to encode or decode: ''' ).strip()
__SCREAMING_SNAKE_CASE = input('''Enter keyword: ''' ).strip()
__SCREAMING_SNAKE_CASE = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__SCREAMING_SNAKE_CASE = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__SCREAMING_SNAKE_CASE = create_cipher_map(UpperCAmelCase__ )
print(func(UpperCAmelCase__ , UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 690 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
with open(__lowerCAmelCase ) as metadata_file:
snake_case__ : Tuple = json.load(__lowerCAmelCase )
snake_case__ : Union[str, Any] = LukeConfig(use_entity_aware_attention=__lowerCAmelCase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
snake_case__ : Any = torch.load(__lowerCAmelCase , map_location='''cpu''' )['''module''']
# Load the entity vocab file
snake_case__ : int = load_original_entity_vocab(__lowerCAmelCase )
# add an entry for [MASK2]
snake_case__ : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case__ : Tuple = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case__ : Optional[Any] = AddedToken('''<ent>''' , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
snake_case__ : int = AddedToken('''<ent2>''' , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , '''r''' ) as f:
snake_case__ : Any = json.load(__lowerCAmelCase )
snake_case__ : Optional[int] = '''MLukeTokenizer'''
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : List[str] = MLukeTokenizer.from_pretrained(__lowerCAmelCase )
# Initialize the embeddings of the special tokens
snake_case__ : Tuple = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
snake_case__ : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
snake_case__ : Optional[Any] = state_dict['''embeddings.word_embeddings.weight''']
snake_case__ : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
snake_case__ : List[Any] = word_emb[enta_init_index].unsqueeze(0 )
snake_case__ : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case__ : List[Any] = state_dict[bias_name]
snake_case__ : Any = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case__ : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case__ : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case__ : Dict = F"""encoder.layer.{layer_index}.attention.self."""
snake_case__ : Tuple = state_dict[prefix + matrix_name]
snake_case__ : Union[str, Any] = state_dict[prefix + matrix_name]
snake_case__ : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case__ : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
snake_case__ : int = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
snake_case__ : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case__ : List[Any] = state_dict['''entity_predictions.bias''']
snake_case__ : Union[str, Any] = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
snake_case__ : Optional[int] = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case__ : Optional[int] = LukeForMaskedLM(config=__lowerCAmelCase ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
snake_case__ : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
snake_case__ : Dict = state_dict[key]
else:
snake_case__ : Union[str, Any] = state_dict[key]
snake_case__ , snake_case__ : Dict = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if set(__lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case__ : Optional[Any] = MLukeTokenizer.from_pretrained(__lowerCAmelCase , task='''entity_classification''' )
snake_case__ : Dict = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
snake_case__ : Dict = (0, 9)
snake_case__ : Dict = tokenizer(__lowerCAmelCase , entity_spans=[span] , return_tensors='''pt''' )
snake_case__ : List[str] = model(**__lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case__ : Any = torch.Size((1, 33, 7_68) )
snake_case__ : Tuple = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case__ : int = torch.Size((1, 1, 7_68) )
snake_case__ : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case__ : List[str] = MLukeTokenizer.from_pretrained(__lowerCAmelCase )
snake_case__ : Optional[int] = '''Tokyo is the capital of <mask>.'''
snake_case__ : Optional[Any] = (24, 30)
snake_case__ : List[str] = tokenizer(__lowerCAmelCase , entity_spans=[span] , return_tensors='''pt''' )
snake_case__ : int = model(**__lowerCAmelCase )
snake_case__ : Any = encoding['''input_ids'''][0].tolist()
snake_case__ : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
snake_case__ : str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowerCAmelCase )
snake_case__ : List[Any] = outputs.entity_logits[0][0].argmax().item()
snake_case__ : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__lowerCAmelCase ) )
model.save_pretrained(__lowerCAmelCase )
def _a ( __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
snake_case__ : List[Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
snake_case__ : str = [json.loads(__lowerCAmelCase ) for line in open(__lowerCAmelCase )]
snake_case__ : Optional[Any] = {}
for entry in data:
snake_case__ : Dict = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case__ : List[str] = entity_id
break
snake_case__ : Optional[Any] = F"""{language}:{entity_name}"""
snake_case__ : Tuple = entity_id
return new_mapping
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCAmelCase__ : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 347 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : Dict , *snake_case_ : Any , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : List[Any] , *snake_case_ : Any , **snake_case_ : int ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 347 | 1 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
class _A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , A_ : str=None , **A_ : Dict ) -> Dict:
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , A_ , )
super().__init__(args=A_ , **A_ ) | 93 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : int ) -> Tuple:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase ( self : Optional[Any] ) -> Tuple:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowercase ( self : List[str] ) -> Optional[Any]:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=A_ , )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 93 | 1 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
UpperCamelCase_ = ""
UpperCamelCase_ = ""
UpperCamelCase_ = ""
UpperCamelCase_ = ""
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tweepy.OAuthHandler(__UpperCamelCase ,__UpperCamelCase )
auth.set_access_token(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = tweepy.API(__UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Optional[int] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : str = api.user_timeline(screen_name=__UpperCamelCase ,count=2_00 )
# save most recent tweets
alltweets.extend(__UpperCamelCase )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Optional[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__UpperCamelCase ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : str = api.user_timeline(
screen_name=__UpperCamelCase ,count=2_00 ,max_id=__UpperCamelCase )
# save most recent tweets
alltweets.extend(__UpperCamelCase )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Optional[Any] = alltweets[-1].id - 1
print(f"...{len(__UpperCamelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Tuple = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" ,'w' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = csv.writer(__UpperCamelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(__UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 28 |
import numpy as np
def _lowerCamelCase ( lowerCamelCase_: np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _UpperCAmelCase ( UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
__lowerCAmelCase = torch.permute(UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase ):
# linear layer
__lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
__lowerCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
if "metadata" in layer:
__lowerCAmelCase = layer.split("metadata" )
__lowerCAmelCase = "".join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
__lowerCAmelCase = layer.split("kvstore" )
__lowerCAmelCase = "".join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
__lowerCAmelCase = layer.split("/" )
__lowerCAmelCase = "/".join(split_layer[:-1] )
__lowerCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCAmelCase = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
__lowerCAmelCase = "file"
else:
__lowerCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = rename_keys(UpperCamelCase )
__lowerCAmelCase = {}
for k, v in current_block.items():
__lowerCAmelCase = v
__lowerCAmelCase = new_current_block
torch.save(UpperCamelCase , UpperCamelCase )
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: Any , UpperCamelCase: str = WEIGHTS_NAME ):
"""simple docstring"""
__lowerCAmelCase = convert_file_size_to_int(UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
__lowerCAmelCase = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
__lowerCAmelCase = flatten_dict(UpperCamelCase , sep="/" )
__lowerCAmelCase = {}
for layer in checkpoint_info.keys():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_key_and_tensorstore_dict(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
if curr_real_layer_name in all_layers:
__lowerCAmelCase = content
else:
__lowerCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCAmelCase = torch.tensor(UpperCamelCase )
__lowerCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCAmelCase , __lowerCAmelCase = rename_base_flax_keys(tuple(key.split("/" ) ) , UpperCamelCase )
__lowerCAmelCase = "/".join(UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCAmelCase = os.path.join(
UpperCamelCase , weights_name.replace(".bin" , F"-{len(UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = raw_weights.to(getattr(UpperCamelCase , UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCAmelCase = os.path.join(UpperCamelCase , weights_name.replace(".bin" , F"-{len(UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCAmelCase = {}
__lowerCAmelCase = {}
for idx, shard in enumerate(UpperCamelCase ):
__lowerCAmelCase = weights_name.replace(
".bin" , F"-{idx+1:05d}-of-{len(UpperCamelCase ):05d}.bin" ) # len(sharded_state_dicts):05d}
__lowerCAmelCase = os.path.join(UpperCamelCase , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
__lowerCAmelCase = shard
for key in shard:
__lowerCAmelCase = shard_file
# Add the metadata
__lowerCAmelCase = {"total_size": total_size}
__lowerCAmelCase = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , "w" , encoding="utf-8" ) as f:
__lowerCAmelCase = json.dumps(UpperCamelCase , indent=2 , sort_keys=UpperCamelCase ) + "\n"
f.write(UpperCamelCase )
return metadata, index
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCamelCase_ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _UpperCAmelCase ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCAmelCase = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
__lowerCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
__lowerCAmelCase = TaTokenizer.from_pretrained("t5-small" )
__lowerCAmelCase = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
__lowerCAmelCase = tokenizer(UpperCamelCase , return_tensors="pt" ).input_ids
__lowerCAmelCase = model.generate(UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 376 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 376 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 391 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = "encodec"
def __init__( self: List[str] , UpperCamelCase_: List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase_: Optional[Any]=2_4000 , UpperCamelCase_: Dict=1 , UpperCamelCase_: List[Any]=False , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , UpperCamelCase_: Dict=128 , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=1 , UpperCamelCase_: Union[str, Any]=[8, 5, 4, 2] , UpperCamelCase_: List[Any]="weight_norm" , UpperCamelCase_: int=7 , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Any="reflect" , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Dict=2 , UpperCamelCase_: Any=1.0 , UpperCamelCase_: Any=1024 , UpperCamelCase_: int=None , UpperCamelCase_: Any=True , **UpperCamelCase_: Optional[Any] , ):
UpperCamelCase_ =target_bandwidths
UpperCamelCase_ =sampling_rate
UpperCamelCase_ =audio_channels
UpperCamelCase_ =normalize
UpperCamelCase_ =chunk_length_s
UpperCamelCase_ =overlap
UpperCamelCase_ =hidden_size
UpperCamelCase_ =num_filters
UpperCamelCase_ =num_residual_layers
UpperCamelCase_ =upsampling_ratios
UpperCamelCase_ =norm_type
UpperCamelCase_ =kernel_size
UpperCamelCase_ =last_kernel_size
UpperCamelCase_ =residual_kernel_size
UpperCamelCase_ =dilation_growth_rate
UpperCamelCase_ =use_causal_conv
UpperCamelCase_ =pad_mode
UpperCamelCase_ =compress
UpperCamelCase_ =num_lstm_layers
UpperCamelCase_ =trim_right_ratio
UpperCamelCase_ =codebook_size
UpperCamelCase_ =codebook_dim if codebook_dim is not None else hidden_size
UpperCamelCase_ =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**UpperCamelCase_ )
@property
def UpperCamelCase__ ( self: int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase__ ( self: List[Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase__ ( self: Optional[Any] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 391 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase : Optional[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class A ( __snake_case ):
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : int = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=SCREAMING_SNAKE_CASE , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE )
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : str = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'Loading model {model_type}' )
A : Dict = model_type
A : Union[str, Any] = tf_checkpoint
A : Optional[Any] = pytorch_dump_output
A : List[str] = config
A : Tuple = finetuning_task_name
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
if "ckpt" in self._tf_checkpoint.lower():
A : List[Any] = self._tf_checkpoint
A : List[Any] = ''''''
else:
A : List[str] = self._tf_checkpoint
A : Optional[Any] = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
SCREAMING_SNAKE_CASE , self._config , self._pytorch_dump_output , SCREAMING_SNAKE_CASE )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 343 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : Tuple = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 343 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A__ ( snake_case__ ):
"""simple docstring"""
def a_ ( self ):
snake_case = tempfile.mkdtemp()
snake_case = 5
# Realm tok
snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
snake_case = os.path.join(__snake_case , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
def a_ ( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
snake_case = RealmConfig(num_block_records=self.num_block_records )
return config
def a_ ( self ):
snake_case = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def a_ ( self ):
snake_case = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=__snake_case , )
return block_records
def a_ ( self ):
snake_case = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def a_ ( self ):
snake_case = self.get_config()
snake_case = self.get_dummy_retriever()
snake_case = retriever.tokenizer
snake_case = np.array([0, 3] , dtype='''long''' )
snake_case = tokenizer(['''Test question'''] ).input_ids
snake_case = tokenizer(
['''the fourth'''] , add_special_tokens=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , ).input_ids
snake_case = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case = retriever(
__snake_case , __snake_case , answer_ids=__snake_case , max_length=__snake_case , return_tensors='''np''' )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def a_ ( self ):
snake_case = self.get_config()
snake_case = self.get_dummy_retriever()
snake_case = retriever.tokenizer
snake_case = np.array([0, 3, 5] , dtype='''long''' )
snake_case = tokenizer(['''Test question'''] ).input_ids
snake_case = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , ).input_ids
snake_case = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case = retriever(
__snake_case , __snake_case , answer_ids=__snake_case , max_length=__snake_case , return_tensors='''np''' )
self.assertEqual([False, True, True] , __snake_case )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __snake_case )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __snake_case )
def a_ ( self ):
snake_case = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 550 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=3_0 , __snake_case=4_0_0 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=1 / 2_5_5 , __snake_case=True , __snake_case=[0.5, 0.5, 0.5] , __snake_case=[0.5, 0.5, 0.5] , __snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean
snake_case = image_std
snake_case = do_pad
def a_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a_ ( self , __snake_case , __snake_case=False ):
if not batched:
snake_case = image_inputs[0]
if isinstance(__snake_case , Image.Image ):
snake_case , snake_case = image.size
else:
snake_case , snake_case = image.shape[1], image.shape[2]
if w < h:
snake_case = int(self.size['''shortest_edge'''] * h / w )
snake_case = self.size['''shortest_edge''']
elif w > h:
snake_case = self.size['''shortest_edge''']
snake_case = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case = self.size['''shortest_edge''']
snake_case = self.size['''shortest_edge''']
else:
snake_case = []
for image in image_inputs:
snake_case , snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case = max(__snake_case , key=lambda __snake_case : item[0] )[0]
snake_case = max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = DetrImageProcessor if is_vision_available() else None
def a_ ( self ):
snake_case = DetrImageProcessingTester(self )
@property
def a_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self ):
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_rescale''' ) )
self.assertTrue(hasattr(__snake_case , '''rescale_factor''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_pad''' ) )
def a_ ( self ):
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __snake_case )
snake_case = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , __snake_case )
def a_ ( self ):
pass
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a_ ( self ):
# prepare image and target
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
snake_case = json.loads(f.read() )
snake_case = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
snake_case = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
snake_case = image_processing(images=__snake_case , annotations=__snake_case , return_tensors='''pt''' )
# verify pixel values
snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
snake_case = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
snake_case = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
snake_case = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify orig_size
snake_case = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
snake_case = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
@slow
def a_ ( self ):
# prepare image, target and masks_path
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
snake_case = json.loads(f.read() )
snake_case = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
snake_case = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
snake_case = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
snake_case = image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='''pt''' )
# verify pixel values
snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
snake_case = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
snake_case = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
snake_case = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify masks
snake_case = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __snake_case )
# verify orig_size
snake_case = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
snake_case = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
| 550 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : int = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138 | 0 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
snake_case : Tuple = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
snake_case : Tuple = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( a_ , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = CamembertTokenizer
__UpperCAmelCase = CamembertTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = True
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : Optional[int] ):
'''simple docstring'''
__magic_name__ = '<pad>'
__magic_name__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def a__ ( self : Optional[int] ):
'''simple docstring'''
__magic_name__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCamelCase_ ) , 1_0_0_4 )
def a__ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
__magic_name__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__magic_name__ = 'I was born in 92000, and this is falsé.'
__magic_name__ = tokenizer.encode(UpperCamelCase_ )
__magic_name__ = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__magic_name__ = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__magic_name__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
__magic_name__ = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = 'I was born in 92000, and this is falsé.'
__magic_name__ = tokenizer.tokenize(UpperCamelCase_ )
__magic_name__ = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__magic_name__ = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = tokenizer.encode(UpperCamelCase_ )
__magic_name__ = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = {'input_ids': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__magic_name__ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=UpperCamelCase_ , ) | 545 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Any ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Optional[int] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : str ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Optional[int] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Tuple , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] ) | 545 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Union[str, Any] ) -> int:
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCAmelCase__ = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCAmelCase_ ( self :List[str] , **lowerCamelCase :Dict ) -> Dict:
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCAmelCase_ ( self :Optional[int] , **lowerCamelCase :Tuple ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCAmelCase_ ( self :Optional[int] , **lowerCamelCase :Tuple ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCAmelCase_ ( self :int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self :List[str] ) -> List[str]:
UpperCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self :Optional[Any] ) -> Optional[int]:
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
UpperCAmelCase__ = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def UpperCAmelCase_ ( self :int ) -> int:
UpperCAmelCase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase__ = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
UpperCAmelCase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def UpperCAmelCase_ ( self :Tuple ) -> List[str]:
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(UpperCAmelCase_ , return_tensors="np" )
UpperCAmelCase__ = processor(images=UpperCAmelCase_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self :Any ) -> List[Any]:
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
UpperCAmelCase__ = "lower newer"
UpperCAmelCase__ = processor(text=UpperCAmelCase_ )
UpperCAmelCase__ = tokenizer(UpperCAmelCase_ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self :Dict ) -> Optional[Any]:
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
UpperCAmelCase__ = "lower newer"
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def UpperCAmelCase_ ( self :int ) -> int:
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ = processor.batch_decode(UpperCAmelCase_ )
UpperCAmelCase__ = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCAmelCase_ ( self :int ) -> Union[str, Any]:
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = AlignProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
UpperCAmelCase__ = "lower newer"
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 702 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase : List[Any] = ["small", "medium", "large"]
_lowerCAmelCase : List[Any] = "lm_head.decoder.weight"
_lowerCAmelCase : Optional[int] = "lm_head.weight"
def lowerCAmelCase ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = torch.load(_lowerCAmelCase )
UpperCAmelCase__ = d.pop(_lowerCAmelCase )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase : Union[str, Any] = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
_lowerCAmelCase : List[str] = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 364 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( UpperCamelCase_ ):
lowercase_ = 42
lowercase_ = 42
def __init__( self : str , a_ : UNetaDModel , a_ : ScoreSdeVeScheduler )-> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self : Optional[int] , a_ : int = 1 , a_ : int = 2000 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : int , )-> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.unet.config.sample_size
SCREAMING_SNAKE_CASE__ : Tuple = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE__ : Tuple = self.unet
SCREAMING_SNAKE_CASE__ : Union[str, Any] = randn_tensor(a_ , generator=a_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : int = sample.to(self.device )
self.scheduler.set_timesteps(a_ )
self.scheduler.set_sigmas(a_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.unet(a_ , a_ ).sample
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler.step_correct(a_ , a_ , generator=a_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE__ : Any = model(a_ , a_ ).sample
SCREAMING_SNAKE_CASE__ : str = self.scheduler.step_pred(a_ , a_ , a_ , generator=a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE__ : Dict = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : str = self.numpy_to_pil(a_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=a_ )
| 85 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 1 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_a : str = None
try:
import msvcrt
except ImportError:
_a : List[str] = None
try:
import fcntl
except ImportError:
_a : int = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_a : int = OSError
# Data
# ------------------------------------------------
_a : Dict = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
_a : Union[str, Any] = '3.0.12'
_a : Optional[int] = None
def SCREAMING_SNAKE_CASE ( ) -> str:
global _logger
_lowerCAmelCase : Tuple = _logger or logging.getLogger(__name__ )
return _logger
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
_lowerCAmelCase : str = lock_file
return None
def __str__( self ):
_lowerCAmelCase : List[str] = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class __A :
def __init__( self , a__ ):
_lowerCAmelCase : List[str] = lock
return None
def __enter__( self ):
return self.lock
def __exit__( self , a__ , a__ , a__ ):
self.lock.release()
return None
class __A :
def __init__( self , a__ , a__=-1 , a__=None ):
_lowerCAmelCase : Optional[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
_lowerCAmelCase : Any = self.hash_filename_if_too_long(a__ , a__ )
# The path to the lock file.
_lowerCAmelCase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_lowerCAmelCase : str = None
# The default timeout value.
_lowerCAmelCase : Tuple = timeout
# We use this lock primarily for the lock counter.
_lowerCAmelCase : List[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_lowerCAmelCase : Any = 0
return None
@property
def __A ( self ):
return self._lock_file
@property
def __A ( self ):
return self._timeout
@timeout.setter
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = float(a__ )
return None
def __A ( self ):
raise NotImplementedError()
def __A ( self ):
raise NotImplementedError()
@property
def __A ( self ):
return self._lock_file_fd is not None
def __A ( self , a__=None , a__=0.0_5 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_lowerCAmelCase : Tuple = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_lowerCAmelCase : int = id(self )
_lowerCAmelCase : Optional[Any] = self._lock_file
_lowerCAmelCase : Dict = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(a__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_lowerCAmelCase : str = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __A ( self , a__=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_lowerCAmelCase : List[str] = id(self )
_lowerCAmelCase : Dict = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
_lowerCAmelCase : Optional[int] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self ):
self.acquire()
return self
def __exit__( self , a__ , a__ , a__ ):
self.release()
return None
def __del__( self ):
self.release(force=a__ )
return None
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = os.path.basename(a__ )
if len(a__ ) > max_length and max_length > 0:
_lowerCAmelCase : Any = os.path.dirname(a__ )
_lowerCAmelCase : Dict = str(hash(a__ ) )
_lowerCAmelCase : Dict = filename[: max_length - len(a__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(a__ , a__ )
else:
return path
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=-1 , a__=None ):
from .file_utils import relative_to_absolute_path
super().__init__(a__ , timeout=a__ , max_filename_length=a__ )
_lowerCAmelCase : Union[str, Any] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def __A ( self ):
_lowerCAmelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_lowerCAmelCase : Union[str, Any] = os.open(self._lock_file , a__ )
except OSError:
pass
else:
try:
msvcrt.locking(a__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a__ )
else:
_lowerCAmelCase : str = fd
return None
def __A ( self ):
_lowerCAmelCase : Tuple = self._lock_file_fd
_lowerCAmelCase : List[str] = None
msvcrt.locking(a__ , msvcrt.LK_UNLCK , 1 )
os.close(a__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=-1 , a__=None ):
_lowerCAmelCase : Tuple = os.statvfs(os.path.dirname(a__ ) ).f_namemax
super().__init__(a__ , timeout=a__ , max_filename_length=a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_lowerCAmelCase : str = os.open(self._lock_file , a__ )
try:
fcntl.flock(a__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a__ )
else:
_lowerCAmelCase : Any = fd
return None
def __A ( self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_lowerCAmelCase : Union[str, Any] = self._lock_file_fd
_lowerCAmelCase : Dict = None
fcntl.flock(a__ , fcntl.LOCK_UN )
os.close(a__ )
return None
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : int = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_lowerCAmelCase : Any = os.open(self._lock_file , a__ )
except OSError:
pass
else:
_lowerCAmelCase : Any = fd
return None
def __A ( self ):
os.close(self._lock_file_fd )
_lowerCAmelCase : List[str] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_a : Optional[int] = None
if msvcrt:
_a : List[str] = WindowsFileLock
elif fcntl:
_a : Dict = UnixFileLock
else:
_a : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 663 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(snake_case__ )
lowerCAmelCase : Any = -1
lowerCAmelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(snake_case__ )
lowerCAmelCase : Optional[int] = model.generate(snake_case__ , max_new_tokens=10 , do_sample=snake_case__ )
lowerCAmelCase : str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Optional[int] = TextStreamer(snake_case__ )
model.generate(snake_case__ , max_new_tokens=10 , do_sample=snake_case__ , streamer=snake_case__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : Any = cs.out[:-1]
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(snake_case__ )
lowerCAmelCase : Tuple = -1
lowerCAmelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(snake_case__ )
lowerCAmelCase : Optional[Any] = model.generate(snake_case__ , max_new_tokens=10 , do_sample=snake_case__ )
lowerCAmelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase : Optional[int] = TextIteratorStreamer(snake_case__ )
lowerCAmelCase : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowerCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=snake_case__ )
thread.start()
lowerCAmelCase : Optional[int] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(snake_case__ )
lowerCAmelCase : Union[str, Any] = -1
lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(snake_case__ )
lowerCAmelCase : str = model.generate(snake_case__ , max_new_tokens=10 , do_sample=snake_case__ )
lowerCAmelCase : List[Any] = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Optional[Any] = TextStreamer(snake_case__ , skip_prompt=snake_case__ )
model.generate(snake_case__ , max_new_tokens=10 , do_sample=snake_case__ , streamer=snake_case__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(snake_case__ )
lowerCAmelCase : Tuple = -1
lowerCAmelCase : int = torch.ones((1, 5) , device=snake_case__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase : List[Any] = TextStreamer(snake_case__ , skip_special_tokens=snake_case__ )
model.generate(snake_case__ , max_new_tokens=1 , do_sample=snake_case__ , streamer=snake_case__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase : str = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase : List[Any] = tokenizer(snake_case__ , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(snake_case__ )
lowerCAmelCase : Tuple = -1
lowerCAmelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(snake_case__ )
lowerCAmelCase : Union[str, Any] = TextIteratorStreamer(snake_case__ , timeout=0.001 )
lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowerCAmelCase : Tuple = Thread(target=model.generate , kwargs=snake_case__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(snake_case__ ):
lowerCAmelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 645 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 1_0
lowerCAmelCase : Optional[int] = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
lowerCAmelCase : Dict = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [9_7], "text": ["1976"]}] * 1_0,
"id": list(range(SCREAMING_SNAKE_CASE ) ),
} , features=SCREAMING_SNAKE_CASE , )
return dataset
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE )
return filename
# FILE_CONTENT + files
lowerCAmelCase__ = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt"
lowerCAmelCase : Optional[Any] = FILE_CONTENT
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return filename
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
import bza
lowerCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
lowerCAmelCase : Optional[int] = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
with bza.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
import gzip
lowerCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
lowerCAmelCase : List[Any] = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
with gzip.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
lowerCAmelCase : List[str] = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
with lza.frame.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE , "w" ) as archive:
archive.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import tarfile
lowerCAmelCase : int = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
import lzma
lowerCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
lowerCAmelCase : Dict = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
with lzma.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
import zipfile
lowerCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
lowerCAmelCase : Any = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
with zstd.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.xml"
lowerCAmelCase : Optional[Any] = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return filename
lowerCAmelCase__ = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
lowerCAmelCase__ = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
lowerCAmelCase__ = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase__ = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
lowerCAmelCase__ = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="session" )
def a__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = datasets.Dataset.from_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE ) ) as con:
lowerCAmelCase : Any = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(SCREAMING_SNAKE_CASE , "w" , newline="" ) as f:
lowerCAmelCase : Union[str, Any] = csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(SCREAMING_SNAKE_CASE , "w" , newline="" ) as f:
lowerCAmelCase : List[Any] = csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
import bza
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(SCREAMING_SNAKE_CASE , "rb" ) as f:
lowerCAmelCase : Dict = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
lowerCAmelCase : Union[str, Any] = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(SCREAMING_SNAKE_CASE , "wb" ) as f:
lowerCAmelCase : Optional[int] = pq.ParquetWriter(SCREAMING_SNAKE_CASE , schema=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=SCREAMING_SNAKE_CASE )
writer.write_table(SCREAMING_SNAKE_CASE )
writer.close()
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
lowerCAmelCase : Optional[Any] = {"data": DATA}
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
lowerCAmelCase : Optional[int] = {"data": DATA_DICT_OF_LISTS}
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
import gzip
lowerCAmelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(SCREAMING_SNAKE_CASE , "rb" ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE , "wb" ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
import gzip
lowerCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(SCREAMING_SNAKE_CASE , "rb" ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE , "wb" ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("nested" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.join("nested" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = ["0", "1", "2", "3"]
lowerCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ["0", "1", "2", "3"]
lowerCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ["0", "1", "2", "3"]
lowerCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename("unsupported.ext" ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Tuple = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def a__ ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Tuple = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
return data_dir
| 645 | 1 |
"""simple docstring"""
def __lowercase ( a : int = 1_000 ) -> int:
__snake_case : Optional[int] =2**power
__snake_case : Optional[Any] =0
while n:
__snake_case , __snake_case : int =r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 497 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __lowercase ( a : Any , a : List[Any] ) -> Dict:
__snake_case : Any =checkpoint
__snake_case : Dict ={}
__snake_case : List[Any] =vae_state_dict['''encoder.conv_in.weight''']
__snake_case : List[str] =vae_state_dict['''encoder.conv_in.bias''']
__snake_case : Union[str, Any] =vae_state_dict['''encoder.conv_out.weight''']
__snake_case : Union[str, Any] =vae_state_dict['''encoder.conv_out.bias''']
__snake_case : str =vae_state_dict['''encoder.norm_out.weight''']
__snake_case : str =vae_state_dict['''encoder.norm_out.bias''']
__snake_case : Tuple =vae_state_dict['''decoder.conv_in.weight''']
__snake_case : str =vae_state_dict['''decoder.conv_in.bias''']
__snake_case : List[str] =vae_state_dict['''decoder.conv_out.weight''']
__snake_case : Tuple =vae_state_dict['''decoder.conv_out.bias''']
__snake_case : Union[str, Any] =vae_state_dict['''decoder.norm_out.weight''']
__snake_case : List[str] =vae_state_dict['''decoder.norm_out.bias''']
__snake_case : Tuple =vae_state_dict['''quant_conv.weight''']
__snake_case : List[str] =vae_state_dict['''quant_conv.bias''']
__snake_case : Optional[int] =vae_state_dict['''post_quant_conv.weight''']
__snake_case : Tuple =vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
__snake_case : Union[str, Any] =len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
__snake_case : Union[str, Any] ={
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
__snake_case : List[Any] =len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
__snake_case : List[Any] ={
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(a )
}
for i in range(a ):
__snake_case : Any =[key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
__snake_case : Optional[Any] =vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
__snake_case : Optional[int] =vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
__snake_case : Optional[Any] =renew_vae_resnet_paths(a )
__snake_case : Union[str, Any] ={'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Tuple =[key for key in vae_state_dict if '''encoder.mid.block''' in key]
__snake_case : Any =2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case : Tuple =[key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
__snake_case : List[str] =renew_vae_resnet_paths(a )
__snake_case : List[str] ={'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : int =[key for key in vae_state_dict if '''encoder.mid.attn''' in key]
__snake_case : List[Any] =renew_vae_attention_paths(a )
__snake_case : Union[str, Any] ={'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
__snake_case : List[Any] =num_up_blocks - 1 - i
__snake_case : Dict =[
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
__snake_case : int =vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
__snake_case : Optional[Any] =vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
__snake_case : List[str] =renew_vae_resnet_paths(a )
__snake_case : int ={'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Any =[key for key in vae_state_dict if '''decoder.mid.block''' in key]
__snake_case : Dict =2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case : Dict =[key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
__snake_case : Tuple =renew_vae_resnet_paths(a )
__snake_case : Tuple ={'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Optional[Any] =[key for key in vae_state_dict if '''decoder.mid.attn''' in key]
__snake_case : Dict =renew_vae_attention_paths(a )
__snake_case : List[str] ={'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def __lowercase ( a : str , a : str , ) -> Optional[int]:
# Only support V1
__snake_case : Union[str, Any] =requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
__snake_case : Union[str, Any] =io.BytesIO(r.content )
__snake_case : Any =OmegaConf.load(a )
__snake_case : Union[str, Any] =512
__snake_case : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
__snake_case : Any ={}
with safe_open(a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
__snake_case : Optional[int] =f.get_tensor(a )
else:
__snake_case : Optional[int] =torch.load(a , map_location=a )['''state_dict''']
# Convert the VAE model.
__snake_case : Dict =create_vae_diffusers_config(a , image_size=a )
__snake_case : Union[str, Any] =custom_convert_ldm_vae_checkpoint(a , a )
__snake_case : str =AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
UpperCamelCase_ : Optional[Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 497 | 1 |
'''simple docstring'''
__snake_case : List[Any] = "Input must be a string of 8 numbers plus letter"
__snake_case : List[str] = "TRWAGMYFPDXBNJZSQVHLCKE"
def __lowerCamelCase ( __snake_case : str ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(__snake_case, __snake_case ):
A__ : Tuple =f"Expected string as input, found {type(__snake_case ).__name__}"
raise TypeError(__snake_case )
A__ : int =spanish_id.replace("""-""", """""" ).upper()
if len(__snake_case ) != 9:
raise ValueError(__snake_case )
try:
A__ : str =int(spanish_id_clean[0:8] )
A__ : Optional[Any] =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__snake_case ) from ex
if letter.isdigit():
raise ValueError(__snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class snake_case__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
def lowercase_ ( self : Optional[int] ) ->Optional[int]:
snake_case__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : str = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key], _snake_case )
def lowercase_ ( self : Optional[Any] ) ->Optional[int]:
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Any = os.path.join(_snake_case, 'feat_extract.json' )
feat_extract_first.to_json_file(_snake_case )
snake_case__ : str = self.feature_extraction_class.from_json_file(_snake_case )
self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict() )
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Union[str, Any] = feat_extract_first.save_pretrained(_snake_case )[0]
check_json_file_has_correct_format(_snake_case )
snake_case__ : Optional[int] = self.feature_extraction_class.from_pretrained(_snake_case )
self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict() )
def lowercase_ ( self : Tuple ) ->Any:
snake_case__ : str = self.feature_extraction_class()
self.assertIsNotNone(_snake_case )
| 478 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __a :
SCREAMING_SNAKE_CASE = XGLMConfig
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = "gelu"
def __init__( self : Tuple , snake_case_ : Tuple , snake_case_ : Any=14 , snake_case_ : Tuple=7 , snake_case_ : Dict=True , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[Any]=True , snake_case_ : Tuple=99 , snake_case_ : int=32 , snake_case_ : str=2 , snake_case_ : int=4 , snake_case_ : Dict=37 , snake_case_ : Tuple="gelu" , snake_case_ : Any=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : Optional[Any]=5_12 , snake_case_ : int=0.0_2 , )-> List[str]:
__lowerCAmelCase =parent
__lowerCAmelCase =batch_size
__lowerCAmelCase =seq_length
__lowerCAmelCase =is_training
__lowerCAmelCase =use_input_mask
__lowerCAmelCase =use_labels
__lowerCAmelCase =vocab_size
__lowerCAmelCase =d_model
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =ffn_dim
__lowerCAmelCase =activation_function
__lowerCAmelCase =activation_dropout
__lowerCAmelCase =attention_dropout
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =initializer_range
__lowerCAmelCase =None
__lowerCAmelCase =0
__lowerCAmelCase =2
__lowerCAmelCase =1
def UpperCamelCase ( self : Any)-> List[Any]:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""")
def UpperCamelCase ( self : Optional[Any])-> str:
__lowerCAmelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
__lowerCAmelCase =None
if self.use_input_mask:
__lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase =self.get_config()
__lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCamelCase ( self : Tuple)-> int:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=snake_case_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=snake_case_ , )
def UpperCamelCase ( self : str)-> str:
__lowerCAmelCase =self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) =config_and_inputs
__lowerCAmelCase ={
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self : Union[str, Any])-> str:
__lowerCAmelCase =TFXGLMModelTester(self)
__lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , n_embd=37)
def UpperCamelCase ( self : Tuple)-> str:
self.config_tester.run_common_tests()
@slow
def UpperCamelCase ( self : str)-> Tuple:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase =TFXGLMModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""")
def UpperCamelCase ( self : List[str])-> Tuple:
super().test_resize_token_embeddings()
@require_tf
class __a ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int , snake_case_ : Optional[int]=True)-> Tuple:
__lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
__lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__lowerCAmelCase =model.generate(snake_case_ , do_sample=snake_case_ , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case_)
@slow
def UpperCamelCase ( self : Union[str, Any])-> List[str]:
__lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
__lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
tf.random.set_seed(0)
__lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""")
__lowerCAmelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0"""):
__lowerCAmelCase =model.generate(snake_case_ , do_sample=snake_case_ , seed=[7, 0])
__lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case_)
__lowerCAmelCase =(
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(snake_case_ , snake_case_)
@slow
def UpperCamelCase ( self : Dict)-> str:
__lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
__lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
__lowerCAmelCase ="""left"""
# use different length sentences to test batching
__lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
__lowerCAmelCase =tokenizer(snake_case_ , return_tensors="""tf""" , padding=snake_case_)
__lowerCAmelCase =inputs["""input_ids"""]
__lowerCAmelCase =model.generate(input_ids=snake_case_ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12)
__lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""").input_ids
__lowerCAmelCase =model.generate(input_ids=snake_case_ , max_new_tokens=12)
__lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""").input_ids
__lowerCAmelCase =model.generate(input_ids=snake_case_ , max_new_tokens=12)
__lowerCAmelCase =tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_)
__lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case_)
__lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case_)
__lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(snake_case_ , snake_case_)
self.assertListEqual(snake_case_ , [non_padded_sentence, padded_sentence])
| 706 |
def __lowerCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 ) -> int:
__lowerCAmelCase =right or len(__lowerCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__lowerCamelCase , __lowerCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 456 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : int = ["""image_processor""", """tokenizer"""]
__lowerCamelCase : List[Any] = """AutoImageProcessor"""
__lowerCamelCase : Tuple = """AutoTokenizer"""
def __init__( self , a , a):
super().__init__(a , a)
lowercase__ : Dict = self.image_processor
def __call__( self , a=None , a=None , a=None , **a):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
lowercase__ : List[str] = self.tokenizer(a , return_tensors=a , **a)
if images is not None:
lowercase__ : Union[str, Any] = self.image_processor(a , return_tensors=a , **a)
if text is not None and images is not None:
lowercase__ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a) , tensor_type=a)
def snake_case_ ( self , *a , **a):
return self.tokenizer.batch_decode(*a , **a)
def snake_case_ ( self , *a , **a):
return self.tokenizer.decode(*a , **a)
@property
def snake_case_ ( self):
return ["input_ids", "attention_mask", "pixel_values"]
| 164 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
snake_case_ = datasets.utils.logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class SCREAMING_SNAKE_CASE__ (folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase : Any = datasets.Audio()
__lowerCamelCase : List[Any] = """audio"""
__lowerCamelCase : List[Any] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : List[str] = AudioClassification(audio_column="""audio""" , label_column="""label""" )
snake_case_ = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
snake_case_ = AUDIO_EXTENSIONS
| 164 | 1 |
from __future__ import annotations
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
# Checks if the entire collection has been sorted
if len(lowercase__ ) <= 1 or n <= 1:
return
insert_next(lowercase__ , n - 1 )
rec_insertion_sort(lowercase__ , n - 1 )
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
# Checks order between adjacent elements
if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
snake_case_ , snake_case_ = (
collection[index],
collection[index - 1],
)
insert_next(lowercase__ , index + 1 )
if __name__ == "__main__":
A = input('Enter integers separated by spaces: ')
A = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 46 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.dummy_uncond_unet
snake_case_ = DDIMScheduler()
snake_case_ = self.dummy_vq_model
snake_case_ = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type='numpy' ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type='numpy' , return_dict=__UpperCamelCase )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
snake_case_ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type='numpy' ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case_ = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
snake_case_ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 46 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def a__ ( A__=None, A__=None ):
return field(default_factory=lambda: default, metadata=A__ )
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = field(
metadata={"""help""": """The csv file to plot."""} , )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
_UpperCAmelCase = list_field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def a__ ( A__ ):
try:
int(A__ )
return True
except ValueError:
return False
def a__ ( A__ ):
try:
float(A__ )
return True
except ValueError:
return False
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = args
SCREAMING_SNAKE_CASE_ : int = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
SCREAMING_SNAKE_CASE_ : Optional[Any] = csv.DictReader(lowerCAmelCase__ )
for row in reader:
SCREAMING_SNAKE_CASE_ : Optional[int] = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
SCREAMING_SNAKE_CASE_ : int = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
SCREAMING_SNAKE_CASE_ : int = float(row['result'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = plt.subplots()
SCREAMING_SNAKE_CASE_ : List[Any] = 'Time usage' if self.args.is_time else 'Memory usage'
SCREAMING_SNAKE_CASE_ : Dict = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
SCREAMING_SNAKE_CASE_ : List[Any] = sorted(set(self.result_dict[model_name]['bsz'] ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sorted(set(self.result_dict[model_name]['seq_len'] ) )
SCREAMING_SNAKE_CASE_ : List[Any] = self.result_dict[model_name]['result']
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
SCREAMING_SNAKE_CASE_ : Dict = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
SCREAMING_SNAKE_CASE_ : Any = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_ : int = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Tuple = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
SCREAMING_SNAKE_CASE_ : str = np.asarray(lowerCAmelCase__ , lowerCAmelCase__ )[: len(lowerCAmelCase__ )]
plt.scatter(
lowerCAmelCase__ , lowerCAmelCase__ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ , '--' )
title_str += F''' {label_model_name} vs.'''
SCREAMING_SNAKE_CASE_ : Any = title_str[:-4]
SCREAMING_SNAKE_CASE_ : List[str] = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCAmelCase__ )
plt.xlabel(lowerCAmelCase__ )
plt.ylabel(lowerCAmelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Any = HfArgumentParser(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = Plot(args=A__ )
plot.plot()
if __name__ == "__main__":
main()
| 101 |
A_ : Optional[int] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A_ : int = [{'type': 'code', 'content': INSTALL_CONTENT}]
A_ : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 303 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["GLPNFeatureExtractor"]
lowerCAmelCase__ = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
'''simple docstring'''
import qiskit
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> qiskit.result.counts.Counts:
UpperCAmelCase_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
UpperCAmelCase_ : List[str] = qiskit.QuantumCircuit(4 ,2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 ,2 )
qc_ha.cx(1 ,2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 ,1 ,3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 ,0 ) # extract XOR value
qc_ha.measure(3 ,1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase_ : List[str] = qiskit.execute(UpperCamelCase ,UpperCamelCase ,shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 471 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
@dataclass
class snake_case_ :
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : Any=False , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Optional[int]=6.0 , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]="fp4" , _UpperCamelCase : List[str]=False , **_UpperCamelCase : int , ) ->Optional[Any]:
snake_case_ = load_in_abit
snake_case_ = load_in_abit
snake_case_ = llm_inta_threshold
snake_case_ = llm_inta_skip_modules
snake_case_ = llm_inta_enable_fpaa_cpu_offload
snake_case_ = llm_inta_has_fpaa_weight
snake_case_ = bnb_abit_quant_type
snake_case_ = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
snake_case_ = torch.floataa
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = getattr(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , torch.dtype ):
snake_case_ = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def snake_case__( self : Tuple ) ->List[Any]:
if not isinstance(self.llm_inta_threshold , _UpperCamelCase ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _UpperCamelCase ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _UpperCamelCase ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , _UpperCamelCase ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , _UpperCamelCase ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , _UpperCamelCase ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def snake_case__( self : Union[str, Any] ) ->Dict:
return self.load_in_abit or self.load_in_abit
def snake_case__( self : Optional[int] ) ->Union[str, Any]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def snake_case__( cls : Any , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , **_UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
snake_case_ = cls(**_UpperCamelCase )
snake_case_ = []
for key, value in kwargs.items():
if hasattr(_UpperCamelCase , _UpperCamelCase ):
setattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
to_remove.append(_UpperCamelCase )
for key in to_remove:
kwargs.pop(_UpperCamelCase , _UpperCamelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def snake_case__( self : List[str] , _UpperCamelCase : Union[str, os.PathLike] ) ->Optional[int]:
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
snake_case_ = self.to_dict()
snake_case_ = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '''\n'''
writer.write(_UpperCamelCase )
def snake_case__( self : Any ) ->Dict[str, Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self : Optional[Any] ) ->List[str]:
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def snake_case__( self : Any , _UpperCamelCase : bool = True ) ->str:
if use_diff is True:
snake_case_ = self.to_diff_dict()
else:
snake_case_ = self.to_dict()
return json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + "\n"
def snake_case__( self : List[str] ) ->Dict[str, Any]:
snake_case_ = self.to_dict()
# get the default config dict
snake_case_ = BitsAndBytesConfig().to_dict()
snake_case_ = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
snake_case_ = value
return serializable_config_dict | 39 |
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> None:
lowercase : List[Any] = len(_A )
print("""The following activities are selected:""" )
# The first activity is always selected
lowercase : Optional[int] = 0
print(_A , end=""",""" )
# Consider rest of the activities
for j in range(_A ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_A , end=""",""" )
lowercase : str = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = [1, 3, 0, 5, 8, 5]
_lowerCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 264 | 0 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_lowerCAmelCase : int = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_lowerCAmelCase : List[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCAmelCase :
def __init__( self ):
lowerCAmelCase : Any = WATERMARK_BITS
lowerCAmelCase : Union[str, Any] = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def lowercase ( self , snake_case__ ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
lowerCAmelCase : Optional[Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase : List[str] = [self.encoder.encode(snake_case__ , 'dwtDct' ) for image in images]
lowerCAmelCase : Union[str, Any] = torch.from_numpy(np.array(snake_case__ ) ).permute(0 , 3 , 1 , 2 )
lowerCAmelCase : Optional[int] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 646 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Tuple = GPTSwaTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Tuple = GPTSwaTokenizer(snake_case__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : List[Any] = 'This is a test'
lowerCAmelCase : List[Any] = 'This is a test'
return input_text, output_text
def lowercase ( self ):
lowerCAmelCase : Tuple = '<s>'
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 2000 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowercase ( self ):
lowerCAmelCase : str = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[int] = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case__ , snake_case__ ):
self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ )
@slow
def lowercase ( self ):
lowerCAmelCase : str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=snake_case__ , )
| 646 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0])
_SCREAMING_SNAKE_CASE : int = get_activation("""gelu""")
self.assertTrue(torch.allclose(gelu_python(_A) , torch_builtin(_A)))
self.assertFalse(torch.allclose(gelu_python(_A) , gelu_new(_A)))
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0])
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_activation("""gelu""")
_SCREAMING_SNAKE_CASE : List[str] = get_activation("""gelu_10""")
_SCREAMING_SNAKE_CASE : Tuple = torch_builtin(_A)
_SCREAMING_SNAKE_CASE : List[Any] = geluaa(_A)
_SCREAMING_SNAKE_CASE : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(_A).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
get_activation("""gelu""")
get_activation("""gelu_10""")
get_activation("""gelu_fast""")
get_activation("""gelu_new""")
get_activation("""gelu_python""")
get_activation("""gelu_pytorch_tanh""")
get_activation("""linear""")
get_activation("""mish""")
get_activation("""quick_gelu""")
get_activation("""relu""")
get_activation("""sigmoid""")
get_activation("""silu""")
get_activation("""swish""")
get_activation("""tanh""")
with self.assertRaises(_A):
get_activation("""bogus""")
with self.assertRaises(_A):
get_activation(_A)
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_activation("""gelu""")
_SCREAMING_SNAKE_CASE : Optional[int] = 1
_SCREAMING_SNAKE_CASE : List[Any] = get_activation("""gelu""")
self.assertEqual(acta.a , 1)
with self.assertRaises(_A):
_SCREAMING_SNAKE_CASE : Any = acta.a
| 338 | """simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCAmelCase_ = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase_ = '''roberta'''
elif args.model_type == "gpt2":
lowerCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase_ = '''transformer'''
lowerCAmelCase_ = model.state_dict()
lowerCAmelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase_ = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase_ = F"{prefix}.embeddings.{w}.weight"
lowerCAmelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase_ = F"{prefix}.embeddings.LayerNorm.{w}"
lowerCAmelCase_ = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
lowerCAmelCase_ = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase_ = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[F"lm_head.dense.{w}"]
lowerCAmelCase_ = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[F"{prefix}.ln_f.{w}"]
lowerCAmelCase_ = state_dict['''lm_head.weight''']
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 338 | 1 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] =Mock()
__lowerCamelCase : int =conn, Mock()
__lowerCamelCase : int =iter([1, None] )
__lowerCamelCase : Optional[int] =lambda SCREAMING_SNAKE_CASE : next(SCREAMING_SNAKE_CASE )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=SCREAMING_SNAKE_CASE )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 363 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =len(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] =[[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCamelCase : List[Any] =(
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 | 1 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ :
__lowerCamelCase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
__lowerCamelCase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__lowerCamelCase : int = field(
default=128 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__lowerCamelCase : bool = field(
default=__magic_name__ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.task_name.lower()
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "train"
__lowerCamelCase : Tuple = "dev"
__lowerCamelCase : int = "test"
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : GlueDataTrainingArguments
__lowerCamelCase : str
__lowerCamelCase : List[InputFeatures]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = Split.train , _lowerCAmelCase = None , ) -> Union[str, Any]:
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , _lowerCAmelCase , )
_lowerCAmelCase = args
_lowerCAmelCase = glue_processors[args.task_name]()
_lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
_lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCAmelCase , _lowerCAmelCase = label_list[2], label_list[1]
_lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + ".lock"
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_lowerCAmelCase = examples[:limit_length]
_lowerCAmelCase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
_lowerCAmelCase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , _lowerCAmelCase ) -> InputFeatures:
return self.features[i]
def _snake_case ( self ) -> Optional[Any]:
return self.label_list
| 18 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 18 | 1 |
import os
import numpy
import onnx
def lowerCamelCase__ ( a : List[Any] , a : List[Any] ) -> str:
"""simple docstring"""
a__ :Tuple = a.name
a__ :Optional[int] = b.name
a__ :Any = ""
a__ :Union[str, Any] = ""
a__ :Union[str, Any] = a == b
a__ :Union[str, Any] = name_a
a__ :Union[str, Any] = name_b
return res
def lowerCamelCase__ ( a : List[str] , a : int , a : Dict ) -> Union[str, Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a , a )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
_graph_replace_input_with(node_proto.attribute[1].g , a , a )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
def lowerCamelCase__ ( a : Optional[int] , a : str , a : str ) -> Union[str, Any]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(a , a , a )
def lowerCamelCase__ ( a : int , a : Optional[int] , a : int ) -> str:
"""simple docstring"""
a__ :str = list(model.graph.initializer )
a__ :str = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a__ :str = inits[i].name
a__ :List[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a , a )
def lowerCamelCase__ ( a : List[Any] ) -> List[str]:
"""simple docstring"""
a__ :Union[str, Any] = os.path.dirname(a )
a__ :int = os.path.basename(a )
a__ :str = onnx.load(os.path.join(a , a ) )
a__ :Union[str, Any] = list(model.graph.initializer )
a__ :Optional[Any] = set()
a__ :int = {}
a__ :Optional[Any] = []
a__ :Tuple = 0
for i in range(len(a ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a )
dup_set.add(a )
a__ :Any = inits[j].data_type
a__ :List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , a )
total_reduced_size += mem_size
a__ :int = inits[i].name
a__ :List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a )
else:
a__ :List[Any] = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a__ :Any = sorted(a )
_remove_dup_initializers_from_model(a , a , a )
a__ :str = "optimized_" + model_file_name
a__ :Optional[int] = os.path.join(a , a )
onnx.save(a , a )
return new_model
| 373 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'pegasus'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any , __A : Dict=50265 , __A : List[Any]=1024 , __A : int=12 , __A : Optional[Any]=4096 , __A : Optional[int]=16 , __A : Dict=12 , __A : List[Any]=4096 , __A : List[str]=16 , __A : Optional[int]=0.0 , __A : List[Any]=0.0 , __A : List[str]=True , __A : Optional[int]=True , __A : str="gelu" , __A : Tuple=1024 , __A : Any=0.1 , __A : List[Any]=0.0 , __A : List[str]=0.0 , __A : Tuple=0.02 , __A : Union[str, Any]=0 , __A : Union[str, Any]=False , __A : Optional[Any]=0 , __A : Tuple=1 , __A : str=1 , **__A : Any , ) ->Union[str, Any]:
"""simple docstring"""
a__ :Any = vocab_size
a__ :List[str] = max_position_embeddings
a__ :int = d_model
a__ :Union[str, Any] = encoder_ffn_dim
a__ :List[Any] = encoder_layers
a__ :Union[str, Any] = encoder_attention_heads
a__ :Tuple = decoder_ffn_dim
a__ :List[Any] = decoder_layers
a__ :Tuple = decoder_attention_heads
a__ :Optional[int] = dropout
a__ :str = attention_dropout
a__ :Optional[int] = activation_dropout
a__ :str = activation_function
a__ :Dict = init_std
a__ :Any = encoder_layerdrop
a__ :int = decoder_layerdrop
a__ :Union[str, Any] = use_cache
a__ :List[Any] = encoder_layers
a__ :Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
@property
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _snake_case ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return self.d_model
| 373 | 1 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : Optional[int]=7 , _A : Any=True , _A : Tuple=True , _A : Any=False , _A : Optional[Any]=True , _A : Optional[int]=99 , _A : Dict=32 , _A : Any=5 , _A : int=4 , _A : Tuple=37 , _A : Optional[Any]="gelu" , _A : Tuple=0.1 , _A : str=0.1 , _A : str=512 , _A : Tuple=16 , _A : Any=2 , _A : Dict=0.02 , _A : Union[str, Any]=3 , _A : Optional[Any]=4 , _A : str=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : str = batch_size
__SCREAMING_SNAKE_CASE : str = seq_length
__SCREAMING_SNAKE_CASE : str = is_training
__SCREAMING_SNAKE_CASE : Tuple = use_input_mask
__SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
__SCREAMING_SNAKE_CASE : List[Any] = use_labels
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : str = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
__SCREAMING_SNAKE_CASE : int = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = num_labels
__SCREAMING_SNAKE_CASE : Tuple = num_choices
__SCREAMING_SNAKE_CASE : str = scope
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , use_stable_embedding=_A , )
def UpperCAmelCase__ ( self : str , _A : str , _A : Optional[int] , _A : Dict , _A : Optional[int] , _A : Dict , _A : List[Any] , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = OpenLlamaModel(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(_A , attention_mask=_A )
__SCREAMING_SNAKE_CASE : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , _A : Optional[Any] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Dict , _A : str , _A : List[str] , _A : Optional[Any] , _A : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenLlamaModel(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__SCREAMING_SNAKE_CASE : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
__SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Dict , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : str , _A : List[str] , _A : int , _A : Union[str, Any] , _A : Tuple , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = OpenLlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[str] , _A : List[Any] , _A : Tuple , _A : Tuple , _A : Optional[Any] , _A : List[str] , _A : Dict , _A : List[str] , _A : Tuple , _A : Any , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Optional[int] = OpenLlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
__SCREAMING_SNAKE_CASE : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : Any = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE : List[str] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE : str = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
),
) : int = config_and_inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCAmelCase_ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = OpenLlamaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE : Optional[int] = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[Any] = 3
__SCREAMING_SNAKE_CASE : Optional[Any] = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Dict = input_ids.ne(1 ).to(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : int = OpenLlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[Any] = 3
__SCREAMING_SNAKE_CASE : int = '''single_label_classification'''
__SCREAMING_SNAKE_CASE : List[Any] = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Tuple = input_ids.ne(1 ).to(_A )
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : int = OpenLlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[str] = 3
__SCREAMING_SNAKE_CASE : Dict = '''multi_label_classification'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Any = input_ids.ne(1 ).to(_A )
__SCREAMING_SNAKE_CASE : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenLlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def UpperCAmelCase__ ( self : int , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : str = ids_tensor([1, 10] , config.vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE : Any = OpenLlamaModel(_A )
original_model.to(_A )
original_model.eval()
__SCREAMING_SNAKE_CASE : int = original_model(_A ).last_hidden_state
__SCREAMING_SNAKE_CASE : Dict = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE : List[Any] = {'''type''': scaling_type, '''factor''': 10.0}
__SCREAMING_SNAKE_CASE : Any = OpenLlamaModel(_A )
scaled_model.to(_A )
scaled_model.eval()
__SCREAMING_SNAKE_CASE : Any = scaled_model(_A ).last_hidden_state
__SCREAMING_SNAKE_CASE : Optional[Any] = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
| 74 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __snake_case ( lowercase : Dict ):
snake_case_ = {}
snake_case_ = job["started_at"]
snake_case_ = job["completed_at"]
snake_case_ = date_parser.parse(lowercase )
snake_case_ = date_parser.parse(lowercase )
snake_case_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case_ = start
snake_case_ = end
snake_case_ = duration_in_min
return job_info
def __snake_case ( lowercase : Tuple , lowercase : Dict=None ):
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
snake_case_ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
snake_case_ = requests.get(lowercase , headers=lowercase ).json()
snake_case_ = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(lowercase ) for job in result["jobs"]} )
snake_case_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
snake_case_ = requests.get(url + f'''&page={i + 2}''' , headers=lowercase ).json()
job_time.update({job["name"]: extract_time_from_single_job(lowercase ) for job in result["jobs"]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowercase__ = parser.parse_args()
lowercase__ = get_job_time(args.workflow_run_id)
lowercase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 508 | 0 |
"""simple docstring"""
import math
import sys
def UpperCAmelCase__ ( A__ ) -> str:
"""simple docstring"""
lowerCamelCase__ = ""
try:
with open(A__ , "rb" ) as binary_file:
lowerCamelCase__ = binary_file.read()
for dat in data:
lowerCamelCase__ = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase__ ( A__ ) -> str:
"""simple docstring"""
lowerCamelCase__ = {"0": "0", "1": "1"}
lowerCamelCase__ , lowerCamelCase__ = "", ""
lowerCamelCase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCamelCase__ = lexicon[curr_string]
result += last_match_id
lowerCamelCase__ = last_match_id + "0"
if math.loga(A__ ).is_integer():
lowerCamelCase__ = {}
for curr_key in list(A__ ):
lowerCamelCase__ = lexicon.pop(A__ )
lowerCamelCase__ = new_lex
lowerCamelCase__ = last_match_id + "1"
index += 1
lowerCamelCase__ = ""
return result
def UpperCAmelCase__ ( A__ , A__ ) -> None:
"""simple docstring"""
lowerCamelCase__ = 8
try:
with open(A__ , "wb" ) as opened_file:
lowerCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase__ ( A__ ) -> str:
"""simple docstring"""
lowerCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCamelCase__ = data_bits[counter:]
lowerCamelCase__ = data_bits[counter + 1 :]
return data_bits
def UpperCAmelCase__ ( A__ , A__ ) -> None:
"""simple docstring"""
lowerCamelCase__ = read_file_binary(A__ )
lowerCamelCase__ = remove_prefix(A__ )
lowerCamelCase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 274 |
"""simple docstring"""
from __future__ import annotations
import bisect
def UpperCAmelCase__ ( A__ , A__ , A__ = 0 , A__ = -1 ) -> int:
"""simple docstring"""
if hi < 0:
lowerCamelCase__ = len(A__ )
while lo < hi:
lowerCamelCase__ = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid
return lo
def UpperCAmelCase__ ( A__ , A__ , A__ = 0 , A__ = -1 ) -> int:
"""simple docstring"""
if hi < 0:
lowerCamelCase__ = len(A__ )
while lo < hi:
lowerCamelCase__ = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid
return lo
def UpperCAmelCase__ ( A__ , A__ , A__ = 0 , A__ = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(A__ , A__ , A__ , A__ ) , A__ )
def UpperCAmelCase__ ( A__ , A__ , A__ = 0 , A__ = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(A__ , A__ , A__ , A__ ) , A__ )
def UpperCAmelCase__ ( A__ , A__ ) -> int | None:
"""simple docstring"""
lowerCamelCase__ = 0
lowerCamelCase__ = len(A__ ) - 1
while left <= right:
lowerCamelCase__ = left + (right - left) // 2
lowerCamelCase__ = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCamelCase__ = midpoint - 1
else:
lowerCamelCase__ = midpoint + 1
return None
def UpperCAmelCase__ ( A__ , A__ ) -> int | None:
"""simple docstring"""
lowerCamelCase__ = bisect.bisect_left(A__ , A__ )
if index != len(A__ ) and sorted_collection[index] == item:
return index
return None
def UpperCAmelCase__ ( A__ , A__ , A__ , A__ ) -> int | None:
"""simple docstring"""
if right < left:
return None
lowerCamelCase__ = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A__ , A__ , A__ , midpoint - 1 )
else:
return binary_search_by_recursion(A__ , A__ , midpoint + 1 , A__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
SCREAMING_SNAKE_CASE_ : int = sorted(int(item) for item in user_input.split(''','''))
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(input('''Enter a single number to be found in the list:\n'''))
SCREAMING_SNAKE_CASE_ : Dict = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 274 | 1 |
from __future__ import annotations
import math
class lowerCamelCase__ :
def __init__( self : Optional[Any] , __a : int ):
'''simple docstring'''
lowerCamelCase__: List[Any] = size
# approximate the overall size of segment tree with given value
lowerCamelCase__: List[Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCamelCase__: int = [0 for i in range(0 , 4 * size )]
lowerCamelCase__: Union[str, Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCamelCase_ ( self : List[str] , __a : int ):
'''simple docstring'''
return idx * 2
def lowerCamelCase_ ( self : Optional[Any] , __a : int ):
'''simple docstring'''
return idx * 2 + 1
def lowerCamelCase_ ( self : List[str] , __a : int , __a : int , __a : int , __a : list[int] ):
'''simple docstring'''
if left_element == right_element:
lowerCamelCase__: Tuple = a[left_element - 1]
else:
lowerCamelCase__: Optional[Any] = (left_element + right_element) // 2
self.build(self.left(__a ) , __a , __a , __a )
self.build(self.right(__a ) , mid + 1 , __a , __a )
lowerCamelCase__: List[str] = max(
self.segment_tree[self.left(__a )] , self.segment_tree[self.right(__a )] )
def lowerCamelCase_ ( self : List[str] , __a : int , __a : int , __a : int , __a : int , __a : int , __a : int ):
'''simple docstring'''
if self.flag[idx] is True:
lowerCamelCase__: Dict = self.lazy[idx]
lowerCamelCase__: Union[str, Any] = False
if left_element != right_element:
lowerCamelCase__: Tuple = self.lazy[idx]
lowerCamelCase__: Dict = self.lazy[idx]
lowerCamelCase__: Dict = True
lowerCamelCase__: Any = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCamelCase__: List[Any] = val
if left_element != right_element:
lowerCamelCase__: Dict = val
lowerCamelCase__: int = val
lowerCamelCase__: Any = True
lowerCamelCase__: Tuple = True
return True
lowerCamelCase__: Tuple = (left_element + right_element) // 2
self.update(self.left(__a ) , __a , __a , __a , __a , __a )
self.update(self.right(__a ) , mid + 1 , __a , __a , __a , __a )
lowerCamelCase__: List[str] = max(
self.segment_tree[self.left(__a )] , self.segment_tree[self.right(__a )] )
return True
def lowerCamelCase_ ( self : Optional[int] , __a : int , __a : int , __a : int , __a : int , __a : int ):
'''simple docstring'''
if self.flag[idx] is True:
lowerCamelCase__: int = self.lazy[idx]
lowerCamelCase__: Dict = False
if left_element != right_element:
lowerCamelCase__: int = self.lazy[idx]
lowerCamelCase__: str = self.lazy[idx]
lowerCamelCase__: str = True
lowerCamelCase__: Dict = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCamelCase__: Tuple = (left_element + right_element) // 2
lowerCamelCase__: Dict = self.query(self.left(__a ) , __a , __a , __a , __a )
lowerCamelCase__: Optional[Any] = self.query(self.right(__a ) , mid + 1 , __a , __a , __a )
return max(__a , __a )
def __str__( self : Any ):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , __a , __a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowercase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowercase = 15
_lowercase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 306 |
from __future__ import annotations
_lowercase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowercase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: str = []
lowerCamelCase__: List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowerCamelCase__: float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowerCamelCase__: Dict = arr[j]
break
result.append(_UpperCamelCase )
return result
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: Tuple = []
for i, outer in enumerate(_UpperCamelCase ):
lowerCamelCase__: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCamelCase__: Dict = inner
break
result.append(_UpperCamelCase )
return result
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = len(_UpperCamelCase )
lowerCamelCase__: list[float] = []
lowerCamelCase__: list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCamelCase__: Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowercase = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 306 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = TransfoXLTokenizer
_a = False
_a = False
def snake_case ( self : Tuple )-> Tuple:
super().setUp()
lowerCamelCase__ : Tuple =[
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
lowerCamelCase__ : Any =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def snake_case ( self : List[str], **lowerCamelCase : Any )-> List[str]:
lowerCamelCase__ : Tuple =True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : Tuple )-> Optional[int]:
lowerCamelCase__ : Optional[int] ='''<unk> UNwanted , running'''
lowerCamelCase__ : Dict ='''<unk> unwanted, running'''
return input_text, output_text
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=lowerCamelCase )
lowerCamelCase__ : Any =tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCamelCase, ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [0, 4, 8, 7] )
def snake_case ( self : List[str] )-> List[str]:
lowerCamelCase__ : Dict =TransfoXLTokenizer(lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def snake_case ( self : Dict )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =TransfoXLTokenizer(lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : Optional[Any] =TransfoXLTokenizer(lower_case=lowerCamelCase )
lowerCamelCase__ : Dict ='''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
lowerCamelCase__ : Dict =[
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase ), lowerCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : List[Any] =self.get_tokenizer()
lowerCamelCase__ : Optional[int] =len(lowerCamelCase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''', 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCamelCase ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ), [1] )
self.assertEqual(tokenizer.decode([1] ), '''new1''' )
| 707 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _a , unittest.TestCase ):
"""simple docstring"""
_snake_case : int = LEDTokenizer
_snake_case : Any = LEDTokenizerFast
_snake_case : Any = True
def A ( self : int )-> List[Any]:
super().setUp()
__UpperCamelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__UpperCamelCase = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
__UpperCamelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__UpperCamelCase = {"unk_token": "<unk>"}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase_ ) )
def A ( self : Union[str, Any] , **A_ : int )-> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def A ( self : Optional[Any] , **A_ : Any )-> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def A ( self : Optional[Any] , A_ : Dict )-> List[str]:
return "lower newer", "lower newer"
@cached_property
def A ( self : Dict )-> int:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def A ( self : List[str] )-> Union[str, Any]:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def A ( self : int )-> Tuple:
__UpperCamelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__UpperCamelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def A ( self : Tuple )-> List[Any]:
__UpperCamelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase_ )
self.assertIn("attention_mask" , lowerCAmelCase_ )
self.assertNotIn("labels" , lowerCAmelCase_ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase_ )
@require_torch
def A ( self : List[str] )-> str:
__UpperCamelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def A ( self : Any )-> Union[str, Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def A ( self : Optional[Any] )-> Union[str, Any]:
__UpperCamelCase = ["A long paragraph for summarization."]
__UpperCamelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = tokenizer(lowerCAmelCase_ , return_tensors="pt" )
__UpperCamelCase = tokenizer(text_target=lowerCAmelCase_ , return_tensors="pt" )
__UpperCamelCase = inputs["input_ids"]
__UpperCamelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def A ( self : Any )-> Union[str, Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = ["Summary of the text.", "Another summary."]
__UpperCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCamelCase = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
__UpperCamelCase = [[0] * len(lowerCAmelCase_ ) for x in encoded_output["input_ids"]]
__UpperCamelCase = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase_ )
def A ( self : Any )-> Dict:
pass
def A ( self : Any )-> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
__UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
__UpperCamelCase = "A, <mask> AllenNLP sentence."
__UpperCamelCase = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
__UpperCamelCase = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) | 505 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ : List[Any] = '''▁'''
SCREAMING_SNAKE_CASE_ : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _A ( __a , unittest.TestCase ):
__a = BertGenerationTokenizer
__a = False
__a = True
def _lowerCamelCase ( self ) -> int:
super().setUp()
lowerCamelCase__ = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ) -> List[str]:
lowerCamelCase__ = "<s>"
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> str:
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1002 )
def _lowerCamelCase ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ) -> int:
lowerCamelCase__ = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCamelCase ( self ) -> List[Any]:
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def _lowerCamelCase ( self ) -> List[str]:
lowerCamelCase__ = "Hello World!"
lowerCamelCase__ = [18536, 2260, 101]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def _lowerCamelCase ( self ) -> str:
lowerCamelCase__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCamelCase__ = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@require_torch
@slow
def _lowerCamelCase ( self ) -> str:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ = " ".join(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors="pt" , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = BertGenerationConfig()
lowerCamelCase__ = BertGenerationEncoder(SCREAMING_SNAKE_CASE__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE__ )
model(**SCREAMING_SNAKE_CASE__ )
@slow
def _lowerCamelCase ( self ) -> List[str]:
# fmt: off
lowerCamelCase__ = {"input_ids": [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 718 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __a , unittest.TestCase ):
__a = PhobertTokenizer
__a = False
def _lowerCamelCase ( self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = ["T@@", "i", "I", "R@@", "r", "e@@"]
lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCamelCase__ = ["#version: 0.2", "l à</w>"]
lowerCamelCase__ = {"unk_token": "<unk>"}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def _lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowerCamelCase__ = "Tôi là VinAI Research"
lowerCamelCase__ = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def _lowerCamelCase ( self ) -> Tuple:
lowerCamelCase__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ = "Tôi là VinAI Research"
lowerCamelCase__ = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
| 274 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase :Union[str, Any] = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase :Dict = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__lowerCamelCase :Optional[Any] = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class A__ ( __UpperCamelCase):
"""simple docstring"""
snake_case__ : str =VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : str =PRETRAINED_INIT_CONFIGURATION
snake_case__ : int =['''input_ids''', '''attention_mask''']
snake_case__ : int =DistilBertTokenizer
def __init__( self: Tuple , __a: Optional[int]=None , __a: List[str]=None , __a: Optional[Any]=True , __a: int="[UNK]" , __a: Optional[int]="[SEP]" , __a: int="[PAD]" , __a: Optional[Any]="[CLS]" , __a: Tuple="[MASK]" , __a: Tuple=True , __a: Optional[int]=None , **__a: str , )-> int:
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
lowerCamelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __a ) != tokenize_chinese_chars
):
lowerCamelCase : Union[str, Any] = getattr(__a , normalizer_state.pop("""type""" ) )
lowerCamelCase : List[Any] = do_lower_case
lowerCamelCase : Union[str, Any] = strip_accents
lowerCamelCase : Dict = tokenize_chinese_chars
lowerCamelCase : str = normalizer_class(**__a )
lowerCamelCase : Union[str, Any] = do_lower_case
def a__ ( self: Optional[Any] , __a: Union[str, Any] , __a: Union[str, Any]=None )-> int:
lowerCamelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self: List[Any] , __a: List[Any] , __a: Optional[Any] = None )-> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self: Union[str, Any] , __a: Optional[int] , __a: List[Any] = None )-> Tuple[str]:
lowerCamelCase : int = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 222 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase__ ( __UpperCamelCase , unittest.TestCase ):
__UpperCAmelCase = MvpTokenizer
__UpperCAmelCase = MvpTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = filter_roberta_detectors
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase : Optional[Any] = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase : int = {"""unk_token""": """<unk>"""}
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case ) )
def _UpperCAmelCase ( self , **snake_case ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self , **snake_case ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self , snake_case ) -> Optional[int]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase : Dict = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Union[str, Any] = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="""pt""" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Any = tokenizer(snake_case , padding=snake_case , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case )
self.assertIn("""attention_mask""" , snake_case )
self.assertNotIn("""labels""" , snake_case )
self.assertNotIn("""decoder_attention_mask""" , snake_case )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : int = tokenizer(text_target=snake_case , max_length=3_2 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : str = tokenizer(
["""I am a small frog""" * 1_0_2_4, """I am a small frog"""] , padding=snake_case , truncation=snake_case , return_tensors="""pt""" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : List[Any] = ["""A long paragraph for summarization."""]
lowercase : List[str] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : List[str] = tokenizer(snake_case , text_target=snake_case , return_tensors="""pt""" )
lowercase : Union[str, Any] = inputs["""input_ids"""]
lowercase : Optional[Any] = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase : Optional[Any] = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase : str = """A, <mask> AllenNLP sentence."""
lowercase : int = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
lowercase : str = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 607 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _UpperCamelCase (a__ :bool = True , *a__ :Optional[int] , **a__ :List[Any] ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
UpperCamelCase__ = False
if main_process_only:
UpperCamelCase__ = PartialState().local_process_index == 0
return _tqdm(*a__ , **a__ , disable=a__ )
| 548 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def _UpperCamelCase (a__ :Any , a__ :Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
UpperCamelCase__ = DatasetInfosDict.from_directory(a__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def _UpperCamelCase (a__ :Optional[int] , a__ :DatasetInfo ):
"""simple docstring"""
UpperCamelCase__ = str(a__ )
dataset_info.write_to_directory(a__ )
UpperCamelCase__ = DatasetInfo.from_directory(a__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a__ , """dataset_info.json""" ) )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
UpperCamelCase__ = dataset_info._to_yaml_dict()
assert sorted(a__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCamelCase__ = yaml.safe_dump(a__ )
UpperCamelCase__ = yaml.safe_load(a__ )
assert dataset_info_yaml_dict == reloaded
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = DatasetInfo()
UpperCamelCase__ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def _UpperCamelCase (a__ :int , a__ :DatasetInfosDict ):
"""simple docstring"""
UpperCamelCase__ = str(a__ )
dataset_infos_dict.write_to_directory(a__ )
UpperCamelCase__ = DatasetInfosDict.from_directory(a__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCamelCase__ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCamelCase__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a__ , """README.md""" ) )
| 548 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int = 1000 ):
__UpperCAmelCase : str = 2**power
__UpperCAmelCase : List[Any] = 0
while n:
__UpperCAmelCase , __UpperCAmelCase : List[str] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 63 |
def lowerCAmelCase ( ) ->Dict:
"""simple docstring"""
__magic_name__ : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__magic_name__ : Optional[Any] = 6
__magic_name__ : Dict = 1
__magic_name__ : Union[str, Any] = 1901
__magic_name__ : List[str] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__magic_name__ : int = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__magic_name__ : Optional[int] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__magic_name__ : Any = day - days_per_month[month - 2]
if month > 12:
year += 1
__magic_name__ : int = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 154 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ):
'''simple docstring'''
UpperCAmelCase_ = ''''''
for word_or_phrase in separated:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | '''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
UpperCAmelCase_ = all_rotations(_UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCamelCase ),
}
return response
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
UpperCAmelCase_ = int(_UpperCamelCase )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_UpperCamelCase ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
UpperCAmelCase_ = [''''''] * len(_UpperCamelCase )
for _ in range(len(_UpperCamelCase ) ):
for i in range(len(_UpperCamelCase ) ):
UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase__ : Optional[int] = "Provide a string that I will generate its BWT transform: "
lowercase__ : List[Any] = input(entry_msg).strip()
lowercase__ : Any = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
lowercase__ : Dict = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 43 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ : int = 0.00
snake_case_ : List[str] = 0
for resistor in resistors:
if resistor <= 0:
snake_case_ : Optional[Any] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def UpperCAmelCase ( lowerCamelCase_ :Dict ):
'''simple docstring'''
snake_case_ : str = 0.00
snake_case_ : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
snake_case_ : Optional[Any] = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod() | 334 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["LayoutLMv3FeatureExtractor"]
__A = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 325 | 0 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
a_ : Optional[int] = pd.read_csv('''sample_data.csv''', header=None)
a_ : Tuple = df.shape[:1][0]
# If you're using some other dataset input the target column
a_ : Any = df.iloc[:, 1:2]
a_ : List[Any] = actual_data.values.reshape(len_data, 1)
a_ : int = MinMaxScaler().fit_transform(actual_data)
a_ : Optional[Any] = 10
a_ : Tuple = 5
a_ : List[str] = 20
a_ : Union[str, Any] = len_data - periods * look_back
a_ : Optional[int] = actual_data[:division]
a_ : int = actual_data[division - look_back :]
a_ , a_ : List[Any] = [], []
a_ , a_ : int = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
a_ : Dict = np.array(train_x)
a_ : Optional[Any] = np.array(test_x)
a_ : List[str] = np.array([list(i.ravel()) for i in train_y])
a_ : Any = np.array([list(i.ravel()) for i in test_y])
a_ : Union[str, Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
a_ : Any = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
a_ : Union[str, Any] = model.predict(x_test)
| 263 |
"""simple docstring"""
def UpperCAmelCase ( A__: int , A__: list[int] , A__: int ) -> int:
def count_of_possible_combinations(A__: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(A__ )
def UpperCAmelCase ( A__: int , A__: list[int] , A__: int ) -> int:
def count_of_possible_combinations_with_dp_array(
A__: int , A__: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__lowerCamelCase : Dict = sum(
count_of_possible_combinations_with_dp_array(target - item , A__ )
for item in array )
__lowerCamelCase : str = answer
return answer
__lowerCamelCase : Union[str, Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(A__ , A__ )
def UpperCAmelCase ( A__: int , A__: list[int] , A__: int ) -> int:
__lowerCamelCase : int = [0] * (target + 1)
__lowerCamelCase : str = 1
for i in range(1 , target + 1 ):
for j in range(A__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Any = 3
a_ : str = 5
a_ : List[Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 263 | 1 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a__ ( ):
UpperCAmelCase_ , UpperCAmelCase_ = 9, 14 # noqa: F841
UpperCAmelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase_ = defaultdict(lowerCAmelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase_ = mst(lowerCAmelCase__ )
UpperCAmelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase_ = tuple(answer[:2] )
UpperCAmelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 82 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_A : str = """sshleifer/mar_enro_6_3_student"""
class a__ ( a_ ):
def __magic_name__ ( self ):
super().setUp()
lowercase : int = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=_a , )
lowercase : Any = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def __magic_name__ ( self ):
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __magic_name__ ( self ):
lowercase : Tuple = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
lowercase : Tuple = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
lowercase : Union[str, Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
lowercase : Optional[Any] = bash_script.replace(_a , str(_a ) )
lowercase : Any = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase : List[Any] = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase : Union[str, Any] = ["finetune.py"] + bash_script.split() + args
with patch.object(_a , "argv" , _a ):
lowercase : Dict = argparse.ArgumentParser()
lowercase : Optional[Any] = pl.Trainer.add_argparse_args(_a )
lowercase : List[str] = SummarizationModule.add_model_specific_args(_a , os.getcwd() )
lowercase : Tuple = parser.parse_args()
lowercase : Optional[Any] = main(_a )
# Check metrics
lowercase : int = load_json(model.metrics_save_path )
lowercase : Union[str, Any] = metrics["val"][0]
lowercase : Tuple = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _a )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase : List[str] = os.listdir(_a )
lowercase : Optional[Any] = [x for x in contents if x.endswith(".ckpt" )][0]
lowercase : str = os.path.join(args.output_dir , _a )
lowercase : int = torch.load(_a , map_location="cpu" )
lowercase : int = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase : str = {os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class a__ ( a_ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __magic_name__ ( self ):
lowercase : List[Any] = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
lowercase : Union[str, Any] = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 128,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
lowercase : Optional[int] = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
lowercase : Tuple = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
lowercase : Optional[int] = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
lowercase : Union[str, Any] = bash_script.replace(_a , str(_a ) )
lowercase : Any = self.get_auto_remove_tmp_dir()
lowercase : str = bash_script.replace("--fp16" , "" )
lowercase : Any = 6
lowercase : Optional[int] = (
["distillation.py"]
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
"--gpus=1",
"--learning_rate=1e-3",
f"""--num_train_epochs={epochs}""",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(_a , "argv" , _a ):
lowercase : Optional[int] = argparse.ArgumentParser()
lowercase : List[str] = pl.Trainer.add_argparse_args(_a )
lowercase : Any = SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
lowercase : str = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase : Dict = distill_main(_a )
# Check metrics
lowercase : Tuple = load_json(model.metrics_save_path )
lowercase : int = metrics["val"][0]
lowercase : Tuple = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase : int = os.listdir(_a )
lowercase : Dict = [x for x in contents if x.endswith(".ckpt" )][0]
lowercase : List[str] = os.path.join(args.output_dir , _a )
lowercase : Any = torch.load(_a , map_location="cpu" )
lowercase : int = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase : str = {os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 701 |
"""simple docstring"""
_A : List[str] = 8.3_1_4_4_5_9_8
def __magic_name__ ( __snake_case : float , __snake_case : float ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_A : Union[str, Any] = 3_00
_A : int = 28
_A : Optional[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(F"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 518 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.