code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : str = ShapEPipeline
UpperCAmelCase__ : Union[str, Any] = ["prompt"]
UpperCAmelCase__ : List[str] = ["prompt"]
UpperCAmelCase__ : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Optional[Any] = False
@property
def snake_case_ ( self ) -> List[Any]:
return 32
@property
def snake_case_ ( self ) -> List[Any]:
return 32
@property
def snake_case_ ( self ) -> Dict:
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[int]:
return 8
@property
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCamelCase : str = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : List[Any] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCamelCase : Tuple = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def snake_case_ ( self ) -> str:
UpperCamelCase : List[Any] = self.dummy_prior
UpperCamelCase : Union[str, Any] = self.dummy_text_encoder
UpperCamelCase : List[str] = self.dummy_tokenizer
UpperCamelCase : Dict = self.dummy_renderer
UpperCamelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, )
UpperCamelCase : Tuple = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Dict:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def snake_case_ ( self ) -> Any:
UpperCamelCase : int = 'cpu'
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = output.images[0]
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase : Optional[Any] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[Any] = torch_device == 'cpu'
UpperCamelCase : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase : Dict = batch_size * [inputs[key]]
UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> int:
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
UpperCamelCase : Optional[Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
UpperCamelCase : Tuple = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase : List[Any] = pipe(
'a shark', generator=SCREAMING_SNAKE_CASE_, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A_ = random.Random()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=1.0 ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> Dict:
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=2000 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=16000 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_="hann_window" , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=7600 , SCREAMING_SNAKE_CASE_=1E-10 , SCREAMING_SNAKE_CASE_=True , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = feature_size
lowerCamelCase_ = padding_value
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = do_normalize
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = hop_length
lowerCamelCase_ = win_length
lowerCamelCase_ = win_function
lowerCamelCase_ = fmin
lowerCamelCase_ = fmax
lowerCamelCase_ = mel_floor
lowerCamelCase_ = return_attention_mask
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> List[str]:
'''simple docstring'''
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
lowerCamelCase_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> Tuple:
'''simple docstring'''
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = SpeechTaFeatureExtractionTester(self )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_ , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCamelCase_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test batched
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = range(800 , 1400 , 200 )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in lengths]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = feat_extract(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=1000 , padding='max_length' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=1000 , padding='longest' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=2000 , padding='longest' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = np.random.rand(100 ).astype(np.floataa )
lowerCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ = feature_extractor(audio_target=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test batched
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) for x, y in zip(SCREAMING_SNAKE_CASE_ , processed_features[input_name] ) ) )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase_ = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_dict
lowerCamelCase_ = True
lowerCamelCase_ = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.feat_extract_dict
lowerCamelCase_ = True
lowerCamelCase_ = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
from datasets import load_dataset
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , SCREAMING_SNAKE_CASE_ , atol=1E-6 ) )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = feature_extractor(audio_target=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
lowerCAmelCase = {
'camembert-base': 512,
}
lowerCAmelCase = '▁'
class _a ( UpperCamelCase__ ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_lowercase : Tuple = CamembertTokenizer
def __init__( self: List[Any] , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Dict=None , UpperCamelCase_: str="<s>" , UpperCamelCase_: Any="</s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Tuple="<pad>" , UpperCamelCase_: Optional[Any]="<mask>" , UpperCamelCase_: Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase_: Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowerCamelCase_ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 43 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = [x.strip() for x in open(_lowerCAmelCase ).readlines()]
_lowerCamelCase : Dict = [x.strip() for x in open(_lowerCAmelCase ).readlines()][: len(_lowerCAmelCase )]
_lowerCamelCase : List[str] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
if save_path is not None:
save_json(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 44 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = "▁"
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = BigBirdTokenizer
_snake_case : List[Any] = BigBirdTokenizerFast
_snake_case : Any = True
_snake_case : Optional[int] = True
def __a ( self :Union[str, Any] ):
super().setUp()
UpperCamelCase__ :List[Any] = self.tokenizer_class(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self :str ):
UpperCamelCase__ :List[str] = """<s>"""
UpperCamelCase__ :str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(lowerCamelCase__ ) , 10_04 )
def __a ( self :Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __a ( self :Optional[Any] ):
if not self.test_rust_tokenizer:
return
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :str = self.get_rust_tokenizer()
UpperCamelCase__ :List[Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase__ :List[str] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = self.get_rust_tokenizer()
UpperCamelCase__ :Any = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ :Tuple = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Dict = BigBirdTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
UpperCamelCase__ :str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCamelCase__ :List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase__ :Tuple = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCamelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __a ( self :Dict ):
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def __a ( self :List[str] ):
UpperCamelCase__ :Dict = """Hello World!"""
UpperCamelCase__ :Any = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
UpperCamelCase__ :Any = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@require_torch
@slow
def __a ( self :str ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCamelCase__ :Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCamelCase__ :Optional[Any] = """ """.join(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = self.big_tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = BigBirdConfig(attention_type="""original_full""" )
UpperCamelCase__ :List[str] = BigBirdModel(lowerCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase__ )
model(**lowerCamelCase__ )
@slow
def __a ( self :List[str] ):
UpperCamelCase__ :Any = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
UpperCamelCase__ :Any = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def __a ( self :Union[str, Any] ):
# fmt: off
UpperCamelCase__ :int = {"""input_ids""": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 45 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = False ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : str = F"""Expected string as input, found {type(_lowerCamelCase )}"""
raise ValueError(_lowerCamelCase )
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : List[Any] = F"""Expected boolean as use_pascal parameter, found {type(_lowerCamelCase )}"""
raise ValueError(_lowerCamelCase )
_lowerCamelCase : str = input_str.split("_" )
_lowerCamelCase : str = 0 if use_pascal else 1
_lowerCamelCase : List[Any] = words[start_index:]
_lowerCamelCase : Tuple = [word[0].upper() + word[1:] for word in words_to_capitalize]
_lowerCamelCase : int = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 46 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
__a : List[str] = 3_8_4
if "tiny" in model_name:
__a : Optional[int] = [3, 3, 9, 3]
__a : Tuple = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
__a : Any = [3, 3, 2_7, 3]
__a : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
__a : str = [3, 3, 2_7, 3]
__a : Dict = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
__a : Optional[int] = 5_1_2
if "large" in model_name:
__a : Tuple = [3, 3, 2_7, 3]
__a : str = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
__a : Dict = 7_6_8
if "xlarge" in model_name:
__a : int = [3, 3, 2_7, 3]
__a : Optional[Any] = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
__a : Optional[int] = 1_0_2_4
# set label information
__a : Tuple = 1_5_0
__a : Optional[int] = 'huggingface/label-files'
__a : Any = 'ade20k-id2label.json'
__a : Dict = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
__a : Union[str, Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__a : Any = {v: k for k, v in idalabel.items()}
__a : Optional[int] = ConvNextConfig(
depths=lowerCamelCase_ , hidden_sizes=lowerCamelCase_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__a : Optional[int] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
__a : int = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
__a : List[str] = dct.pop(lowerCamelCase_ )
__a : Optional[Any] = val
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ):
__a : str = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
__a : Any = model_name_to_url[model_name]
__a : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['state_dict']
__a : Optional[int] = get_upernet_config(lowerCamelCase_ )
__a : Any = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__a : Dict = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
__a : Dict = key.replace('bn' , 'batch_norm' )
__a : int = val
# rename keys
__a : Dict = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
__a : str = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__a : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('RGB' )
__a : Union[str, Any] = SegformerImageProcessor()
__a : str = processor(lowerCamelCase_ , return_tensors='pt' ).pixel_values
with torch.no_grad():
__a : Union[str, Any] = model(lowerCamelCase_ )
if model_name == "upernet-convnext-tiny":
__a : Optional[Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__a : Tuple = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__a : Optional[int] = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__a : str = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__a : int = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F"upernet-convnext-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 47 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 0 |
'''simple docstring'''
def A ( UpperCamelCase_ : bytes ) -> str:
'''simple docstring'''
return "".join([hex(UpperCamelCase_ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase_ )] )
def A ( UpperCamelCase_ : str ) -> bytes:
'''simple docstring'''
if (len(UpperCamelCase_ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase_ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : int = logging.get_logger(__name__)
_lowercase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_lowercase : Optional[int] = {
'gpt-neox-20b': 20_48,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = ["input_ids", "attention_mask"]
def __init__( self : Tuple , _lowercase : Optional[int]=None , _lowercase : Optional[Any]=None , _lowercase : str=None , _lowercase : List[Any]="<|endoftext|>" , _lowercase : List[Any]="<|endoftext|>" , _lowercase : Optional[int]="<|endoftext|>" , _lowercase : List[Any]=False , **_lowercase : Optional[int] , ):
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowercase ) != add_prefix_space:
__UpperCAmelCase = getattr(_lowercase , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**_lowercase )
__UpperCAmelCase = add_prefix_space
def a ( self : int , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
def a ( self : Any , _lowercase : "Conversation" ):
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowercase , add_special_tokens=_lowercase ) + [self.eos_token_id] )
if len(_lowercase ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 49 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = RobertaTokenizer
_UpperCamelCase = RobertaTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = {'cls_token': '<s>'}
def UpperCamelCase_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCamelCase__ = dict(zip(_lowerCAmelCase ,range(len(_lowerCAmelCase ) ) ) )
lowerCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase__ = {"""unk_token""": """<unk>"""}
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCamelCase__ = tokenizer.tokenize(_lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" ,add_special_tokens=_lowerCAmelCase ) ,[0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" ,add_special_tokens=_lowerCAmelCase ) ,[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] ,)
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.tokenizer_class.from_pretrained("""roberta-base""" )
lowerCamelCase__ = tokenizer.encode("""sequence builders""" ,add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(
"""sequence builders""" ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(
"""sequence builders""" ,"""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ,_lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = """Encode this sequence."""
lowerCamelCase__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowerCAmelCase ,_lowerCAmelCase )
# Testing spaces after special tokens
lowerCamelCase__ = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase )} ) # mask token has a left space
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
lowerCamelCase__ = """Encode <mask> sequence"""
lowerCamelCase__ = """Encode <mask>sequence"""
lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase )
lowerCamelCase__ = encoded.index(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase )
lowerCamelCase__ = encoded.index(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = """A, <mask> AllenNLP sentence."""
lowerCamelCase__ = tokenizer_r.encode_plus(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.encode_plus(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_lowerCAmelCase ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowerCAmelCase ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def UpperCamelCase_ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase )
lowerCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] ,_lowerCAmelCase )
self.assertEqual(post_processor_state["""add_prefix_space"""] ,_lowerCAmelCase )
self.assertEqual(post_processor_state["""trim_offsets"""] ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase__ = F'''{text_of_1_token} {text_of_1_token}'''
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,)
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,)
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(_lowerCAmelCase ), len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,)
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(_lowerCAmelCase ), len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,)
lowerCamelCase__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(_lowerCAmelCase ) + 1, 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,)
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(_lowerCAmelCase ), 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,)
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(_lowerCAmelCase ), 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,)
| 50 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
@slow
@require_torch
def _lowerCamelCase ( self ):
__a : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a : Optional[int] = bertabert.config.encoder.vocab_size
__a : str = tokenizer.sep_token_id
__a : Union[str, Any] = tokenizer.cls_token_id
__a : Tuple = 128
__a : Tuple = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a : Optional[int] = train_dataset.select(range(32 ) )
__a : Optional[Any] = val_dataset.select(range(16 ) )
__a : List[Any] = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a : Tuple = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_UpperCAmelCase , max_length=512 )
__a : int = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_UpperCAmelCase , max_length=128 )
__a : int = inputs.input_ids
__a : Optional[int] = inputs.attention_mask
__a : Tuple = outputs.input_ids
__a : str = outputs.input_ids.copy()
__a : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a : Tuple = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
__a : int = pred.label_ids
__a : int = pred.predictions
# all unnecessary tokens are removed
__a : Optional[int] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__a : Union[str, Any] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__a : Any = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
__a : str = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a : Optional[int] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a : Optional[Any] = self.get_auto_remove_tmp_dir()
__a : int = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='''steps''' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a : Dict = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 52 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 0 |
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int = 0 ):
__lowerCAmelCase = length or len(lowerCAmelCase_ )
__lowerCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = list_data[i + 1], list_data[i]
__lowerCAmelCase = True
return list_data if not swapped else bubble_sort(lowerCAmelCase_, length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( __lowercase , unittest.TestCase ):
_snake_case =MgpstrTokenizer
_snake_case =False
_snake_case ={}
_snake_case =False
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
UpperCAmelCase_ =["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
UpperCAmelCase_ =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCAmelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: Tuple ) -> str:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ ="tester"
UpperCAmelCase_ ="tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode([special_token] , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_input_output_texts(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertNotEqual(len(_lowerCAmelCase ) , 0 )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(text_a.replace(" " , "" ) , _lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def lowerCAmelCase__ ( self: str ) -> Union[str, Any]:
'''simple docstring'''
pass
| 54 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int ,A : int=2 ,A : Optional[Any]=3 ,A : Dict=4 ,A : Optional[int]=2 ,A : Union[str, Any]=7 ,A : List[str]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Optional[int]=True ,A : Tuple=99 ,A : Optional[int]=36 ,A : Dict=3 ,A : str=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Any=16 ,A : Union[str, Any]=2 ,A : List[Any]=0.02 ,A : List[Any]=6 ,A : Optional[int]=6 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : Tuple=None ,A : List[str]=10_00 ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = patch_size
__A = text_seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = coordinate_size
__A = shape_size
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__A = text_seq_length
__A = (image_size // patch_size) ** 2 + 1
__A = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.text_seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
__A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : Any ,A : Dict ,A : List[Any] ):
__A = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
__A = model(A ,pixel_values=A )
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__A = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : List[str] ,A : Any ,A : List[Any] ,A : Any ,A : Any ,A : Dict ,A : Optional[Any] ):
__A = self.num_labels
__A = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Dict ,A : str ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Union[str, Any] ):
__A = self.num_labels
__A = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : str ,A : List[str] ,A : int ,A : List[str] ,A : List[str] ,A : Dict ):
__A = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Tuple ,A : List[Any] ,A : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = LayoutLMvaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : List[str] ,A : Dict=False ):
__A = copy.deepcopy(A )
if model_class in get_values(A ):
__A = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
__A = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in get_values(A ):
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A ,)
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
__A = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).pixel_values.to(A )
__A = torch.tensor([[1, 2]] )
__A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__A = model(
input_ids=input_ids.to(A ) ,bbox=bbox.to(A ) ,pixel_values=pixel_values.to(A ) ,)
# verify the logits
__A = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,A )
__A = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A ,atol=1E-4 ) )
| 55 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 0 |
'''simple docstring'''
def _a (lowercase__ : str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowercase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 56 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=4 , ):
UpperCamelCase_: int = parent
UpperCamelCase_: List[str] = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: Union[str, Any] = is_training
UpperCamelCase_: Optional[int] = use_attention_mask
UpperCamelCase_: List[Any] = use_token_type_ids
UpperCamelCase_: Optional[Any] = use_labels
UpperCamelCase_: List[str] = vocab_size
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: str = num_hidden_layers
UpperCamelCase_: str = num_attention_heads
UpperCamelCase_: List[Any] = intermediate_size
UpperCamelCase_: Optional[Any] = hidden_act
UpperCamelCase_: int = hidden_dropout_prob
UpperCamelCase_: Any = attention_probs_dropout_prob
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: int = type_vocab_size
UpperCamelCase_: str = type_sequence_label_size
UpperCamelCase_: Union[str, Any] = initializer_range
UpperCamelCase_: Optional[Any] = num_choices
def _a ( self ):
UpperCamelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: List[str] = None
if self.use_attention_mask:
UpperCamelCase_: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: List[Any] = None
if self.use_token_type_ids:
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_: List[str] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ):
UpperCamelCase_: Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: List[str] = config_and_inputs
UpperCamelCase_: Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Dict =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ):
UpperCamelCase_: int = FlaxAlbertModelTester(self )
@slow
def _a ( self ):
for model_class_name in self.all_model_classes:
UpperCamelCase_: int = model_class_name.from_pretrained('albert-base-v2' )
UpperCamelCase_: Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
UpperCamelCase_: Any = FlaxAlbertModel.from_pretrained('albert-base-v2' )
UpperCamelCase_: Union[str, Any] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase_: int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase_: str = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase_: Union[str, Any] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , _lowerCamelCase )
UpperCamelCase_: Any = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 57 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 0 |
"""simple docstring"""
__lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
snake_case_ : Optional[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
snake_case_ : Optional[int] = _calculate(days - 1 , __UpperCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
snake_case_ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
snake_case_ : int = _calculate(days - 1 , __UpperCamelCase , 0 )
snake_case_ : Tuple = state_late + state_absent + state_ontime
snake_case_ : List[Any] = prizestrings
return prizestrings
def __lowerCAmelCase ( __UpperCamelCase : int = 3_0 ):
'''simple docstring'''
return _calculate(__UpperCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 58 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=10 , __magic_name__=3 , __magic_name__=2 , __magic_name__=2 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__="divided_space_time" , __magic_name__=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Union[str, Any] = image_size
snake_case_ : Tuple = num_channels
snake_case_ : int = patch_size
snake_case_ : str = num_frames
snake_case_ : str = is_training
snake_case_ : Dict = use_labels
snake_case_ : Optional[int] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Tuple = attention_type
snake_case_ : Any = initializer_range
snake_case_ : Optional[int] = scope
snake_case_ : List[str] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
snake_case_ : Any = (image_size // patch_size) ** 2
snake_case_ : List[str] = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[str] = None
if self.use_labels:
snake_case_ : Dict = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
snake_case_ : Tuple = self.num_labels
return config
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = TimesformerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = TimesformerForVideoClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Union[str, Any] = model(__magic_name__ )
# verify the logits shape
snake_case_ : Tuple = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = config_and_inputs
snake_case_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCamelCase_ : Union[str, Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Dict = False
lowerCamelCase_ : List[Any] = False
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = TimesformerModelTester(self )
snake_case_ : Tuple = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = model_class(__magic_name__ )
snake_case_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__magic_name__ )
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = TimesformerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
for model_class in self.all_model_classes:
snake_case_ : List[Any] = self.model_tester.seq_length
snake_case_ : List[Any] = self.model_tester.num_frames
snake_case_ : List[str] = True
snake_case_ : Union[str, Any] = False
snake_case_ : List[str] = True
snake_case_ : List[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
snake_case_ : Optional[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
snake_case_ : Optional[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Dict = True
snake_case_ : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
snake_case_ : List[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
snake_case_ : Dict = len(__magic_name__ )
# Check attention is always last and order is fine
snake_case_ : Dict = True
snake_case_ : Any = True
snake_case_ : int = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
snake_case_ : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
snake_case_ : Optional[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
snake_case_ : List[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
snake_case_ : str = outputs.hidden_states
snake_case_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
snake_case_ : Optional[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : List[str] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
snake_case_ : Dict = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__magic_name__ )
snake_case_ : List[str] = self.default_image_processor
snake_case_ : List[str] = prepare_video()
snake_case_ : int = image_processor(video[:8] , return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
# verify the logits
snake_case_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
snake_case_ : Any = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 60 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Dict=10 , SCREAMING_SNAKE_CASE__ : str=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__ : Any=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Dict:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embeddings_size
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> List[str]:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : Tuple ) -> str:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
lowerCAmelCase__ = RegNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = RegNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : List[Any] ) -> int:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[Any] ) -> List[Any]:
lowerCAmelCase__ = RegNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : Any ) -> Dict:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def a ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def a ( self : List[Any] ) -> Dict:
pass
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def a ( self : int ) -> Optional[Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase__ = layer_type
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> List[str]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : Dict ) -> List[str]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Dict ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a ( self : Tuple ) -> Any:
lowerCAmelCase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 61 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 0 |
import argparse
import os
import re
import packaging.version
snake_case = """examples/"""
snake_case = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
snake_case = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
snake_case = """README.md"""
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE : str = f.read()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE : List[str] = replace.replace("VERSION" , lowercase )
SCREAMING_SNAKE_CASE : Any = re_pattern.sub(lowercase , lowercase )
with open(lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
for folder, directories, fnames in os.walk(lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowercase , lowercase ) , lowercase , pattern="examples" )
def lowerCamelCase__ ( lowercase , lowercase=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase , lowercase , lowercase )
if not patch:
update_version_in_examples(lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE : Optional[int] = "1. Want to contribute a new model?"
with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE : str = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE : int = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE : int = f.read()
SCREAMING_SNAKE_CASE : str = REPLACE_PATTERNS["init"][0].search(lowercase ).groups()[0]
return packaging.version.parse(lowercase )
def lowerCamelCase__ ( lowercase=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE : Dict = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE : str = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
SCREAMING_SNAKE_CASE : str = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE : str = input(F'''Which version are you releasing? [{default_version}]''' )
if len(lowercase ) == 0:
SCREAMING_SNAKE_CASE : List[Any] = default_version
print(F'''Updating version to {version}.''' )
global_version_update(lowercase , patch=lowercase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = get_version()
SCREAMING_SNAKE_CASE : Dict = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
SCREAMING_SNAKE_CASE : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE : List[Any] = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(lowercase ) == 0:
SCREAMING_SNAKE_CASE : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(lowercase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
snake_case = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 62 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a :
"""simple docstring"""
a : int
a : int
class a :
"""simple docstring"""
def __init__( self : Optional[int] , __lowercase : int ) -> List[Any]:
__UpperCAmelCase : list[list[Edge]] = [[] for _ in range(__lowercase )]
__UpperCAmelCase : Optional[Any] = size
def __getitem__( self : int , __lowercase : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
return self._size
def UpperCAmelCase ( self : List[str] , __lowercase : int , __lowercase : int , __lowercase : int ) -> Any:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(__lowercase , __lowercase ) )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : int ) -> int | None:
__UpperCAmelCase : List[str] = deque([start_vertex] )
__UpperCAmelCase : list[int | None] = [None] * self.size
__UpperCAmelCase : Optional[int] = 0
while queue:
__UpperCAmelCase : Tuple = queue.popleft()
__UpperCAmelCase : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCAmelCase : Any = current_distance + edge.weight
__UpperCAmelCase : Tuple = distances[edge.destination_vertex]
if (
isinstance(__lowercase , __lowercase )
and new_distance >= dest_vertex_distance
):
continue
__UpperCAmelCase : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ : str = logging.get_logger(__name__)
lowercase_ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase_ : Any = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
lowercase_ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
lowercase_ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = PRETRAINED_INIT_CONFIGURATION
__a = RoFormerTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , lowerCAmelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , lowerCAmelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__: Any= getattr(lowerCAmelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= do_lower_case
SCREAMING_SNAKE_CASE__: str= strip_accents
SCREAMING_SNAKE_CASE__: Union[str, Any]= pre_tok_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= do_lower_case
def __getstate__( self ) -> Dict:
SCREAMING_SNAKE_CASE__: str= self.__dict__.copy()
SCREAMING_SNAKE_CASE__: Tuple= BertPreTokenizer()
return state
def __setstate__( self , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= d
SCREAMING_SNAKE_CASE__: Tuple= self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__: str= PreTokenizer.custom(JiebaPreTokenizer(lowerCAmelCase ) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> List[int]:
SCREAMING_SNAKE_CASE__: Dict= [self.sep_token_id]
SCREAMING_SNAKE_CASE__: str= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__: Tuple= self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , **lowerCAmelCase , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= BertPreTokenizer()
return super().save_pretrained(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
| 64 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class __lowercase ( __lowerCamelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case_ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ = Features({"""text""": Value("""string""" )} )
snake_case_ = Features({"""labels""": ClassLabel} )
snake_case_ = "text"
snake_case_ = "labels"
def __lowercase ( self : Dict ,A : int ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
UpperCAmelCase__ : Dict = copy.deepcopy(self )
UpperCAmelCase__ : Optional[int] = self.label_schema.copy()
UpperCAmelCase__ : Dict = features[self.label_column]
UpperCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 65 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : int = args.pruning_method
_lowercase : Optional[int] = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Optional[Any] = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
_lowercase : int = torch.load(os.path.join(SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[Any] = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_lowercase : Tuple = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
_lowercase : List[Any] = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_lowercase : Optional[int] = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE , threshold=SCREAMING_SNAKE_CASE )
_lowercase : Dict = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : List[Any] = name[:-6]
_lowercase : Dict = model[F"""{prefix_}mask_scores"""]
_lowercase : Any = TopKBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : Dict = name[:-6]
_lowercase : Union[str, Any] = model[F"""{prefix_}mask_scores"""]
_lowercase : Any = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : str = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[int] = model[F"""{prefix_}mask_scores"""]
_lowercase , _lowercase : List[str] = -0.1, 1.1
_lowercase : int = torch.sigmoid(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = s * (r - l) + l
_lowercase : Union[str, Any] = s_bar.clamp(min=0.0 , max=1.0 )
_lowercase : Optional[int] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : Tuple = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE ) , F"""bertarized_{os.path.basename(SCREAMING_SNAKE_CASE )}""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
shutil.copytree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
UpperCamelCase = parser.parse_args()
main(args)
| 66 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case = """CompVis/stable-diffusion-v1-1"""
snake_case = """CompVis/stable-diffusion-v1-2"""
snake_case = """CompVis/stable-diffusion-v1-3"""
snake_case = """CompVis/stable-diffusion-v1-4"""
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : AutoencoderKL ,__A : CLIPTextModel ,__A : CLIPTokenizer ,__A : UNetaDConditionModel ,__A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,__A : StableDiffusionSafetyChecker ,__A : CLIPImageProcessor ,__A : bool = True ,) -> Any:
super()._init_()
_lowercase = StableDiffusionPipeline.from_pretrained(__A )
_lowercase = StableDiffusionPipeline.from_pretrained(__A )
_lowercase = StableDiffusionPipeline.from_pretrained(__A )
_lowercase = StableDiffusionPipeline(
vae=__A ,text_encoder=__A ,tokenizer=__A ,unet=__A ,scheduler=__A ,safety_checker=__A ,feature_extractor=__A ,requires_safety_checker=__A ,)
self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea )
@property
def __UpperCAmelCase ( self : Dict ) -> Dict[str, Any]:
return {k: getattr(self ,__A ) for k in self.config.keys() if not k.startswith('_' )}
def __UpperCAmelCase ( self : int ,__A : Optional[Union[str, int]] = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
self.enable_attention_slicing(__A )
@torch.no_grad()
def __UpperCAmelCase ( self : int ,__A : Union[str, List[str]] ,__A : int = 512 ,__A : int = 512 ,__A : int = 50 ,__A : float = 7.5 ,__A : Optional[Union[str, List[str]]] = None ,__A : Optional[int] = 1 ,__A : float = 0.0 ,__A : Optional[torch.Generator] = None ,__A : Optional[torch.FloatTensor] = None ,__A : Optional[str] = "pil" ,__A : bool = True ,__A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__A : int = 1 ,**__A : Dict ,) -> Tuple:
return self.pipea(
prompt=__A ,height=__A ,width=__A ,num_inference_steps=__A ,guidance_scale=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,output_type=__A ,return_dict=__A ,callback=__A ,callback_steps=__A ,**__A ,)
@torch.no_grad()
def __UpperCAmelCase ( self : int ,__A : Union[str, List[str]] ,__A : int = 512 ,__A : int = 512 ,__A : int = 50 ,__A : float = 7.5 ,__A : Optional[Union[str, List[str]]] = None ,__A : Optional[int] = 1 ,__A : float = 0.0 ,__A : Optional[torch.Generator] = None ,__A : Optional[torch.FloatTensor] = None ,__A : Optional[str] = "pil" ,__A : bool = True ,__A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__A : int = 1 ,**__A : Union[str, Any] ,) -> Tuple:
return self.pipea(
prompt=__A ,height=__A ,width=__A ,num_inference_steps=__A ,guidance_scale=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,output_type=__A ,return_dict=__A ,callback=__A ,callback_steps=__A ,**__A ,)
@torch.no_grad()
def __UpperCAmelCase ( self : Optional[int] ,__A : Union[str, List[str]] ,__A : int = 512 ,__A : int = 512 ,__A : int = 50 ,__A : float = 7.5 ,__A : Optional[Union[str, List[str]]] = None ,__A : Optional[int] = 1 ,__A : float = 0.0 ,__A : Optional[torch.Generator] = None ,__A : Optional[torch.FloatTensor] = None ,__A : Optional[str] = "pil" ,__A : bool = True ,__A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__A : int = 1 ,**__A : int ,) -> str:
return self.pipea(
prompt=__A ,height=__A ,width=__A ,num_inference_steps=__A ,guidance_scale=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,output_type=__A ,return_dict=__A ,callback=__A ,callback_steps=__A ,**__A ,)
@torch.no_grad()
def __UpperCAmelCase ( self : Any ,__A : Union[str, List[str]] ,__A : int = 512 ,__A : int = 512 ,__A : int = 50 ,__A : float = 7.5 ,__A : Optional[Union[str, List[str]]] = None ,__A : Optional[int] = 1 ,__A : float = 0.0 ,__A : Optional[torch.Generator] = None ,__A : Optional[torch.FloatTensor] = None ,__A : Optional[str] = "pil" ,__A : bool = True ,__A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__A : int = 1 ,**__A : Union[str, Any] ,) -> List[Any]:
return self.pipea(
prompt=__A ,height=__A ,width=__A ,num_inference_steps=__A ,guidance_scale=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,output_type=__A ,return_dict=__A ,callback=__A ,callback_steps=__A ,**__A ,)
@torch.no_grad()
def __UpperCAmelCase ( self : List[str] ,__A : Union[str, List[str]] ,__A : int = 512 ,__A : int = 512 ,__A : int = 50 ,__A : float = 7.5 ,__A : Optional[Union[str, List[str]]] = None ,__A : Optional[int] = 1 ,__A : float = 0.0 ,__A : Optional[torch.Generator] = None ,__A : Optional[torch.FloatTensor] = None ,__A : Optional[str] = "pil" ,__A : bool = True ,__A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__A : int = 1 ,**__A : Tuple ,) -> Optional[Any]:
_lowercase = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(__A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
_lowercase = self.textaimg_sda_a(
prompt=__A ,height=__A ,width=__A ,num_inference_steps=__A ,guidance_scale=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,output_type=__A ,return_dict=__A ,callback=__A ,callback_steps=__A ,**__A ,)
# Get first result from Stable Diffusion Checkpoint v1.2
_lowercase = self.textaimg_sda_a(
prompt=__A ,height=__A ,width=__A ,num_inference_steps=__A ,guidance_scale=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,output_type=__A ,return_dict=__A ,callback=__A ,callback_steps=__A ,**__A ,)
# Get first result from Stable Diffusion Checkpoint v1.3
_lowercase = self.textaimg_sda_a(
prompt=__A ,height=__A ,width=__A ,num_inference_steps=__A ,guidance_scale=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,output_type=__A ,return_dict=__A ,callback=__A ,callback_steps=__A ,**__A ,)
# Get first result from Stable Diffusion Checkpoint v1.4
_lowercase = self.textaimg_sda_a(
prompt=__A ,height=__A ,width=__A ,num_inference_steps=__A ,guidance_scale=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,output_type=__A ,return_dict=__A ,callback=__A ,callback_steps=__A ,**__A ,)
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 67 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__A = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
__A = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Optional[int] = ['input_ids', 'attention_mask']
lowerCamelCase : Optional[int] = DistilBertTokenizer
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[Any]="[UNK]" , __SCREAMING_SNAKE_CASE : Optional[Any]="[SEP]" , __SCREAMING_SNAKE_CASE : Any="[PAD]" , __SCREAMING_SNAKE_CASE : Tuple="[CLS]" , __SCREAMING_SNAKE_CASE : List[Any]="[MASK]" , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Any , ) -> str:
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
__UpperCAmelCase =do_lower_case
__UpperCAmelCase =strip_accents
__UpperCAmelCase =tokenize_chinese_chars
__UpperCAmelCase =normalizer_class(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =do_lower_case
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None ) -> Optional[Any]:
__UpperCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase =[self.sep_token_id]
__UpperCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase =self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 68 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 0 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : Union[str, Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
a : int = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> List[Any]:
__snake_case = EfficientNetConfig()
__snake_case = CONFIG_MAP[model_name]["hidden_dim"]
__snake_case = CONFIG_MAP[model_name]["width_coef"]
__snake_case = CONFIG_MAP[model_name]["depth_coef"]
__snake_case = CONFIG_MAP[model_name]["image_size"]
__snake_case = CONFIG_MAP[model_name]["dropout_rate"]
__snake_case = CONFIG_MAP[model_name]["dw_padding"]
__snake_case = "huggingface/label-files"
__snake_case = "imagenet-1k-id2label.json"
__snake_case = 10_00
__snake_case = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
__snake_case = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( ) -> int:
__snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> Tuple:
__snake_case = CONFIG_MAP[model_name]["image_size"]
__snake_case = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> str:
__snake_case = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
__snake_case = sorted(set(_UpperCAmelCase ) )
__snake_case = len(_UpperCAmelCase )
__snake_case = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
__snake_case = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
__snake_case = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
__snake_case = {}
for item in rename_keys:
if item[0] in original_param_names:
__snake_case = "efficientnet." + item[1]
__snake_case = "classifier.weight"
__snake_case = "classifier.bias"
return key_mapping
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
__snake_case = key_mapping[key]
if "_conv" in key and "kernel" in key:
__snake_case = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__snake_case = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__snake_case = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
__snake_case = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> List[str]:
__snake_case = model_classes[model_name](
include_top=_UpperCAmelCase , weights="imagenet" , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=10_00 , classifier_activation="softmax" , )
__snake_case = original_model.trainable_variables
__snake_case = original_model.non_trainable_variables
__snake_case = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__snake_case = param.numpy()
__snake_case = list(tf_params.keys() )
# Load HuggingFace model
__snake_case = get_efficientnet_config(_UpperCAmelCase )
__snake_case = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
__snake_case = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
__snake_case = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__snake_case = convert_image_processor(_UpperCAmelCase )
__snake_case = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__snake_case = hf_model(**_UpperCAmelCase )
__snake_case = outputs.logits.detach().numpy()
# Original model inference
__snake_case = False
__snake_case = CONFIG_MAP[model_name]["image_size"]
__snake_case = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__snake_case = image.img_to_array(_UpperCAmelCase )
__snake_case = np.expand_dims(_UpperCAmelCase , axis=0 )
__snake_case = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
__snake_case = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
a : Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 69 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 0 |
from ... import PretrainedConfig
lowerCamelCase : Any = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase = '''nezha'''
def __init__( self : Union[str, Any] , A_ : Dict=21128 , A_ : Dict=768 , A_ : Dict=12 , A_ : Union[str, Any]=12 , A_ : int=3072 , A_ : Tuple="gelu" , A_ : Optional[Any]=0.1 , A_ : List[Any]=0.1 , A_ : Union[str, Any]=512 , A_ : List[Any]=64 , A_ : Tuple=2 , A_ : Dict=0.02 , A_ : List[Any]=1E-12 , A_ : Any=0.1 , A_ : str=0 , A_ : List[Any]=2 , A_ : Tuple=3 , A_ : Tuple=True , **A_ : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = max_relative_position
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = classifier_dropout
lowerCamelCase_ = use_cache
| 70 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCamelCase = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"schedulers/" ) )
UpperCAmelCase_ : Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_snake_case ,"src/diffusers/schedulers/scheduling_ddpm.py" ) ,os.path.join(self.diffusers_dir ,"schedulers/scheduling_ddpm.py" ) ,)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
UpperCAmelCase_ : Any = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
UpperCAmelCase_ : List[str] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
UpperCAmelCase_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_19 )
UpperCAmelCase_ : Union[str, Any] = black.format_str(_snake_case ,mode=_snake_case )
UpperCAmelCase_ : Any = os.path.join(self.diffusers_dir ,"new_code.py" )
with open(_snake_case ,"w" ,newline="\n" ) as f:
f.write(_snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=_snake_case )
with open(_snake_case ,"r" ) as f:
self.assertTrue(f.read() ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" ,"DDPMSchedulerOutput" ,REFERENCE_CODE + "\n" ,)
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" ,"DDPMSchedulerOutput" ,_snake_case ,)
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" ,"TestSchedulerOutput" ,re.sub("DDPM" ,"Test" ,_snake_case ) ,)
# Copy consistency with a really long name
UpperCAmelCase_ : str = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' ,f'''{long_class_name}SchedulerOutput''' ,re.sub("Bert" ,_snake_case ,_snake_case ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" ,"TestSchedulerOutput" ,_snake_case ,overwrite_result=re.sub("DDPM" ,"Test" ,_snake_case ) ,)
| 71 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : int = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ : Any = 'true'
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16):
set_seed(42)
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase)
model.to(accelerator.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return model, ddp_model, dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation')
def tokenize_function(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt')
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt')
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase)
targs.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase)
return logits, targs
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
assert (
len(_UpperCAmelCase) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}'''
def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False):
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase)
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no']
model.to(_UpperCAmelCase)
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase)
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels'])
SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''')
test_mrpc(_UpperCAmelCase , _UpperCAmelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''')
test_torch_metrics(_UpperCAmelCase , 99)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**')
SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(_UpperCAmelCase , 512)
accelerator.state._reset_state()
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowercase_ = True
except ImportError:
lowercase_ = False
try:
from torch.hub import _get_torch_home
lowercase_ = _get_torch_home()
except ImportError:
lowercase_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
lowercase_ = os.path.join(torch_cache_home, """transformers""")
lowercase_ = """https://cdn.huggingface.co"""
lowercase_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
lowercase_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
lowercase_ = os.path.join(PATH, """config.yaml""")
lowercase_ = os.path.join(PATH, """attributes.txt""")
lowercase_ = os.path.join(PATH, """objects.txt""")
lowercase_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
lowercase_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
lowercase_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
lowercase_ = """pytorch_model.bin"""
lowercase_ = """config.yaml"""
def a__ ( snake_case=OBJECTS , snake_case=ATTRIBUTES ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = []
with open(snake_case ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__SCREAMING_SNAKE_CASE : List[str] = []
with open(snake_case ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict()
with open(snake_case , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = pkl.load(snake_case )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__SCREAMING_SNAKE_CASE : int = ckp.pop(snake_case )
if isinstance(snake_case , np.ndarray ):
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(snake_case )
else:
assert isinstance(snake_case , torch.tensor ), type(snake_case )
__SCREAMING_SNAKE_CASE : Any = v
return r
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = {}
def __init__( self : Dict , _A : dict , _A : str = "root" , _A : str=0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = name
__SCREAMING_SNAKE_CASE : Dict = level
__SCREAMING_SNAKE_CASE : Tuple = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(_A )
__SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(_A )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[int] = Config(_A , name=_A , level=level + 1 )
__SCREAMING_SNAKE_CASE : str = v
setattr(self , _A , _A )
__SCREAMING_SNAKE_CASE : Optional[int] = d
def __repr__( self : List[str] ):
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = val
__SCREAMING_SNAKE_CASE : Optional[Any] = val
__SCREAMING_SNAKE_CASE : Dict = key.split('''.''' )
__SCREAMING_SNAKE_CASE : List[str] = len(_A ) - 1
__SCREAMING_SNAKE_CASE : Optional[int] = self._pointer
if len(_A ) > 1:
for i, l in enumerate(_A ):
if hasattr(self , _A ) and isinstance(getattr(self , _A ) , _A ):
setattr(getattr(self , _A ) , '''.'''.join(levels[i:] ) , _A )
if l == last_level:
__SCREAMING_SNAKE_CASE : List[str] = val
else:
__SCREAMING_SNAKE_CASE : Optional[int] = pointer[l]
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return self._pointer
def UpperCAmelCase__ ( self : Any , _A : int , _A : Tuple ):
"""simple docstring"""
with open(F'''{file_name}''' , '''w''' ) as stream:
dump(_A , _A )
def UpperCAmelCase__ ( self : Optional[Any] , _A : List[Any] , _A : Tuple ):
"""simple docstring"""
with open(F'''{file_name}''' , '''w''' ) as stream:
json.dump(_A , _A )
@staticmethod
def UpperCAmelCase__ ( _A : List[Any] ):
"""simple docstring"""
with open(_A ) as stream:
__SCREAMING_SNAKE_CASE : Optional[Any] = load(_A , Loader=_A )
return data
def __str__( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ''' '''
if self._name != "root":
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{t * (self._level-1)}{self._name}:\n'''
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
__SCREAMING_SNAKE_CASE : str = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_A , _A ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_A ).__name__})\n'''
__SCREAMING_SNAKE_CASE : Optional[Any] = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , _A : str , **_A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(_A , **_A )
return cls(_A )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , _A : str , **_A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''cache_dir''' , _A )
__SCREAMING_SNAKE_CASE : str = kwargs.pop('''force_download''' , _A )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''resume_download''' , _A )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''proxies''' , _A )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''local_files_only''' , _A )
if os.path.isdir(_A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_A , _A )
elif os.path.isfile(_A ) or is_remote_url(_A ):
__SCREAMING_SNAKE_CASE : Dict = pretrained_model_name_or_path
else:
__SCREAMING_SNAKE_CASE : List[str] = hf_bucket_url(_A , filename=_A , use_cdn=_A )
try:
# Load from URL or cache if already cached
__SCREAMING_SNAKE_CASE : str = cached_path(
_A , cache_dir=_A , force_download=_A , proxies=_A , resume_download=_A , local_files_only=_A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__SCREAMING_SNAKE_CASE : str = Config.load_yaml(_A )
except EnvironmentError:
__SCREAMING_SNAKE_CASE : Optional[int] = '''Can\'t load config for'''
raise EnvironmentError(_A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(_A ), kwargs
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load('''dump.pt''' , map_location=in_tensor.device )
__SCREAMING_SNAKE_CASE : int = in_tensor.numpy()
__SCREAMING_SNAKE_CASE : List[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case , snake_case , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(snake_case , snake_case , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = urlparse(snake_case )
return parsed.scheme in ("http", "https")
def a__ ( snake_case , snake_case , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__SCREAMING_SNAKE_CASE : Optional[Any] = '''/''' not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def a__ ( snake_case , snake_case , snake_case=None , snake_case=0 , snake_case=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case , snake_case ) for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
__SCREAMING_SNAKE_CASE : Dict = {'''user-agent''': ua}
if resume_size > 0:
__SCREAMING_SNAKE_CASE : List[Any] = '''bytes=%d-''' % (resume_size,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = requests.get(snake_case , stream=snake_case , proxies=snake_case , headers=snake_case )
if response.status_code == 416: # Range not satisfiable
return
__SCREAMING_SNAKE_CASE : str = response.headers.get('''Content-Length''' )
__SCREAMING_SNAKE_CASE : Dict = resume_size + int(snake_case ) if content_length is not None else None
__SCREAMING_SNAKE_CASE : Dict = tqdm(
unit='''B''' , unit_scale=snake_case , total=snake_case , initial=snake_case , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case ) )
temp_file.write(snake_case )
progress.close()
def a__ ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=10 , snake_case=False , snake_case=None , snake_case=False , ):
"""simple docstring"""
if cache_dir is None:
__SCREAMING_SNAKE_CASE : Optional[int] = TRANSFORMERS_CACHE
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = str(snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = None
if not local_files_only:
try:
__SCREAMING_SNAKE_CASE : List[Any] = requests.head(snake_case , allow_redirects=snake_case , proxies=snake_case , timeout=snake_case )
if response.status_code == 200:
__SCREAMING_SNAKE_CASE : str = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__SCREAMING_SNAKE_CASE : Tuple = url_to_filename(snake_case , snake_case )
# get cache path to put the file
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(snake_case , snake_case )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case ):
return cache_path
else:
__SCREAMING_SNAKE_CASE : List[str] = [
file
for file in fnmatch.filter(os.listdir(snake_case ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case ) > 0:
return os.path.join(snake_case , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__SCREAMING_SNAKE_CASE : List[str] = cache_path + '''.lock'''
with FileLock(snake_case ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__SCREAMING_SNAKE_CASE : Optional[int] = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case , '''a+b''' ) as f:
yield f
__SCREAMING_SNAKE_CASE : Dict = _resumable_file_manager
if os.path.exists(snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = os.stat(snake_case ).st_size
else:
__SCREAMING_SNAKE_CASE : str = 0
else:
__SCREAMING_SNAKE_CASE : Dict = partial(tempfile.NamedTemporaryFile , dir=snake_case , delete=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case , temp_file.name , )
http_get(
snake_case , snake_case , proxies=snake_case , resume_size=snake_case , user_agent=snake_case , )
os.replace(temp_file.name , snake_case )
__SCREAMING_SNAKE_CASE : Dict = {'''url''': url, '''etag''': etag}
__SCREAMING_SNAKE_CASE : List[str] = cache_path + '''.json'''
with open(snake_case , '''w''' ) as meta_file:
json.dump(snake_case , snake_case )
return cache_path
def a__ ( snake_case , snake_case=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = url.encode('''utf-8''' )
__SCREAMING_SNAKE_CASE : Tuple = shaaaa(snake_case )
__SCREAMING_SNAKE_CASE : str = url_hash.hexdigest()
if etag:
__SCREAMING_SNAKE_CASE : Any = etag.encode('''utf-8''' )
__SCREAMING_SNAKE_CASE : Optional[int] = shaaaa(snake_case )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def a__ ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=False , snake_case=False , ):
"""simple docstring"""
if cache_dir is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : Optional[int] = str(snake_case )
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = str(snake_case )
if is_remote_url(snake_case ):
# URL, so get it from the cache (downloading if necessary)
__SCREAMING_SNAKE_CASE : Optional[Any] = get_from_cache(
snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , user_agent=snake_case , local_files_only=snake_case , )
elif os.path.exists(snake_case ):
# File, and it exists.
__SCREAMING_SNAKE_CASE : Optional[Any] = url_or_filename
elif urlparse(snake_case ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case ) )
if extract_compressed_file:
if not is_zipfile(snake_case ) and not tarfile.is_tarfile(snake_case ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.split(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , snake_case )
if os.path.isdir(snake_case ) and os.listdir(snake_case ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__SCREAMING_SNAKE_CASE : Dict = output_path + '''.lock'''
with FileLock(snake_case ):
shutil.rmtree(snake_case , ignore_errors=snake_case )
os.makedirs(snake_case )
if is_zipfile(snake_case ):
with ZipFile(snake_case , '''r''' ) as zip_file:
zip_file.extractall(snake_case )
zip_file.close()
elif tarfile.is_tarfile(snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tarfile.open(snake_case )
tar_file.extractall(snake_case )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case ) )
return output_path_extracted
return output_path
def a__ ( snake_case , snake_case="," ):
"""simple docstring"""
assert isinstance(snake_case , snake_case )
if os.path.isfile(snake_case ):
with open(snake_case ) as f:
__SCREAMING_SNAKE_CASE : Optional[int] = eval(f.read() )
else:
__SCREAMING_SNAKE_CASE : int = requests.get(snake_case )
try:
__SCREAMING_SNAKE_CASE : str = requests.json()
except Exception:
__SCREAMING_SNAKE_CASE : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval(snake_case )
except Exception:
__SCREAMING_SNAKE_CASE : Any = data.split('''\n''' )
req.close()
return data
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = requests.get(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case )
with open(snake_case , '''rb''' ) as stream:
__SCREAMING_SNAKE_CASE : str = pkl.load(snake_case )
__SCREAMING_SNAKE_CASE : int = weights.pop('''model''' )
__SCREAMING_SNAKE_CASE : str = {}
for k, v in model.items():
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(snake_case )
if "running_var" in k:
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0] )
__SCREAMING_SNAKE_CASE : Optional[int] = k.replace('''running_var''' , '''num_batches_tracked''' )
__SCREAMING_SNAKE_CASE : Any = zero
return new
def a__ ( ):
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(snake_case , os.pardir ) )}/demo.ipynb''' )
def a__ ( snake_case , snake_case="RGB" ):
"""simple docstring"""
assert isinstance(snake_case , snake_case )
if os.path.isfile(snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = cva.imread(snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = get_image_from_url(snake_case )
assert img is not None, F'''could not connect to: {im}'''
__SCREAMING_SNAKE_CASE : int = cva.cvtColor(snake_case , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__SCREAMING_SNAKE_CASE : Dict = img[:, :, ::-1]
return img
def a__ ( snake_case , snake_case=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(snake_case ) , snake_case ))
| 74 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 0 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 0 |
"""simple docstring"""
import numpy as np
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return np.where(vector > 0 , __UpperCamelCase , (alpha * (np.exp(__UpperCamelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__UpperCAmelCase : Optional[int] = str(UpperCamelCase )
__UpperCAmelCase : List[Any] = "".join(sorted(UpperCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _UpperCamelCase ( UpperCamelCase = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
while True:
if check_bouncy(UpperCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 77 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 0 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
SCREAMING_SNAKE_CASE_: Any =DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
SCREAMING_SNAKE_CASE_: Dict ='main'
# Default branch name
SCREAMING_SNAKE_CASE_: Optional[int] ='f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
SCREAMING_SNAKE_CASE_: Optional[Any] ='aaaaaaa'
# This commit does not exist, so we should 404.
SCREAMING_SNAKE_CASE_: List[str] ='d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
SCREAMING_SNAKE_CASE_: Dict ='4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
print("Bonjour!" )
yield
print("Au revoir!" )
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class __A ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _lowercase (self : str , __a : List[Any] ):
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _lowercase (self : Optional[Any] , __a : List[Any] ):
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _lowercase (self : Dict , __a : Union[str, Any] ):
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def _lowercase (self : int ):
self.assertEqual(find_labels(__a ) , ["labels"] )
self.assertEqual(find_labels(__a ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__a ) , ["start_positions", "end_positions"] )
class __A ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(__a ) , ["labels"] )
@require_tf
def _lowercase (self : Tuple ):
self.assertEqual(find_labels(__a ) , ["labels"] )
self.assertEqual(find_labels(__a ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__a ) , ["start_positions", "end_positions"] )
class __A ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(__a ) , ["labels"] )
@require_flax
def _lowercase (self : Tuple ):
# Flax models don't have labels
self.assertEqual(find_labels(__a ) , [] )
self.assertEqual(find_labels(__a ) , [] )
self.assertEqual(find_labels(__a ) , [] )
class __A ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(__a ) , [] )
| 78 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ : str = 25_60_47
SCREAMING_SNAKE_CASE__ : Optional[int] = 25_61_45
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = NllbTokenizer
__lowerCamelCase = NllbTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = {}
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Any = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ : str = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : str = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Dict = tempfile.mkdtemp()
UpperCAmelCase__ : int = tokenizer_r.save_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : Dict = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : str = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : Tuple = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : List[str] = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
UpperCAmelCase__ : int = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : int = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : List[str] = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : int = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : str = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
@require_torch
def __UpperCAmelCase ( self ):
if not self.test_seqaseq:
return
UpperCAmelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
UpperCAmelCase__ : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
UpperCAmelCase__ : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
UpperCAmelCase__ : Optional[Any] = tokenizer.prepare_seqaseq_batch(
_lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCAmelCase__ : Tuple = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , _lowerCAmelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : int = [AddedToken("""<special>""" , lstrip=_lowerCAmelCase )]
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : str = tokenizer_r.encode("""Hey this is a <special> token""" )
UpperCAmelCase__ : Optional[Any] = tokenizer_r.encode("""<special>""" , add_special_tokens=_lowerCAmelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.encode("""Hey this is a <special> token""" )
UpperCAmelCase__ : Dict = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
__lowerCamelCase = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__lowerCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__lowerCamelCase = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def __UpperCAmelCase ( cls ):
UpperCAmelCase__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
UpperCAmelCase__ : Union[str, Any] = 1
return cls
def __UpperCAmelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256057 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase__ : Union[str, Any] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
UpperCAmelCase__ : Optional[Any] = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , _lowerCAmelCase )
UpperCAmelCase__ : Any = 10
UpperCAmelCase__ : Dict = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [256203, 3] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = NllbTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase )
@require_torch
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase__ : Optional[int] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
UpperCAmelCase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase__ : List[Any] = targets["""input_ids"""]
UpperCAmelCase__ : Dict = shift_tokens_right(
_lowerCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[256047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256057,
} , )
@require_torch
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Tuple = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 79 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
__snake_case :str = ViTImageProcessor if is_vision_available() else None
@property
def _a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = (3, 32, 128)
__lowercase = tempfile.mkdtemp()
# fmt: off
__lowercase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
__lowercase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
__lowercase = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : List[Any] , **_lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self : Union[str, Any] , **_lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__lowercase = Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) )
return image_input
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
__lowercase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_lowerCAmelCase , return_tensors="""np""" )
__lowercase = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = """test"""
__lowercase = processor(text=_lowerCAmelCase )
__lowercase = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = """test"""
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.char_decode(_lowerCAmelCase )
__lowercase = tokenizer.batch_decode(_lowerCAmelCase )
__lowercase = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = None
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = torch.randn(1 , 27 , 38 )
__lowercase = torch.randn(1 , 27 , 5_0257 )
__lowercase = torch.randn(1 , 27 , 3_0522 )
__lowercase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 80 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = IFInpaintingPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def __snake_case ( self : int ) -> str:
return self._get_dummy_components()
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Dict=0 ) -> Dict:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase )
else:
__snake_case : Any = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __snake_case ( self : Tuple ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __snake_case ( self : Union[str, Any] ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __snake_case ( self : Optional[int] ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __snake_case ( self : Any ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __snake_case ( self : List[Any] ) -> Tuple:
self._test_save_load_local()
def __snake_case ( self : List[Any] ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 81 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = ""
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 82 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 0 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def snake_case_ ( A_ : list[float] ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = len(A_ )
for i in range(A_ ):
_lowerCamelCase : float = -1
for j in range(i + 1, A_ ):
if arr[i] < arr[j]:
_lowerCamelCase : int = arr[j]
break
result.append(A_ )
return result
def snake_case_ ( A_ : list[float] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
for i, outer in enumerate(A_ ):
_lowerCamelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowerCamelCase : List[str] = inner
break
result.append(A_ )
return result
def snake_case_ ( A_ : list[float] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = len(A_ )
_lowerCamelCase : list[float] = []
_lowerCamelCase : list[float] = [-1] * arr_size
for index in reversed(range(A_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowerCamelCase : Any = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase__ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 83 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ['a', 'b', 'c']
# Defaults to last layer if both are None
lowercase , lowercase = get_aligned_output_features_output_indices(snake_case , snake_case , snake_case )
self.assertEqual(snake_case , ['c'] )
self.assertEqual(snake_case , [2] )
# Out indices set to match out features
lowercase , lowercase = get_aligned_output_features_output_indices(['a', 'c'] , snake_case , snake_case )
self.assertEqual(snake_case , ['a', 'c'] )
self.assertEqual(snake_case , [0, 2] )
# Out features set to match out indices
lowercase , lowercase = get_aligned_output_features_output_indices(snake_case , [0, 2] , snake_case )
self.assertEqual(snake_case , ['a', 'c'] )
self.assertEqual(snake_case , [0, 2] )
# Out features selected from negative indices
lowercase , lowercase = get_aligned_output_features_output_indices(snake_case , [-3, -1] , snake_case )
self.assertEqual(snake_case , ['a', 'c'] )
self.assertEqual(snake_case , [-3, -1] )
def SCREAMING_SNAKE_CASE__ ( self ):
# Stage names must be set
with self.assertRaises(snake_case ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , snake_case )
# Out features must be a list
with self.assertRaises(snake_case ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(snake_case ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(snake_case ):
verify_out_features_out_indices(snake_case , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(snake_case ):
verify_out_features_out_indices(snake_case , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(snake_case ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(snake_case ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(snake_case ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BackboneMixin()
lowercase = ['a', 'b', 'c']
lowercase = ['a', 'c']
lowercase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowercase = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowercase = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 84 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _a ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : List[Any] ):
'''simple docstring'''
if openai_config_file == "":
SCREAMING_SNAKE_CASE__ : Optional[int] = OpenAIGPTConfig()
else:
SCREAMING_SNAKE_CASE__ : int = OpenAIGPTConfig.from_json_file(lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = OpenAIGPTModel(lowercase__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ : str = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ : Dict = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 85 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__a :Optional[Any] = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__a :List[Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = (images / 2 + 0.5).clamp(0 ,1 )
A_ = images.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
A_ = numpy_to_pil(__UpperCamelCase )
return images
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if images.ndim == 3:
A_ = images[None, ...]
A_ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
A_ = [Image.fromarray(image.squeeze() ,mode="L" ) for image in images]
else:
A_ = [Image.fromarray(__UpperCamelCase ) for image in images]
return pil_images
| 86 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = torch.load(lowercase_ , map_location='''cpu''' )
if "model" in sd.keys():
A__ = torch.load(lowercase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
A__ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
A__ = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A__ = sd.pop(lowercase_ )
A__ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A__ = sd[key]
# We split QKV in separate Q,K,V
A__ = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
A__ = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
A__ = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
A__ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A__ , A__ , A__ = torch.split(lowercase_ , depth // 3 , dim=0 )
A__ = q
A__ = k
A__ = v
del sd[key]
return sd
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None ) -> Any:
"""simple docstring"""
A__ = load_checkpoint(lowercase_ )
if config is not None:
A__ = OPTConfig.from_pretrained(lowercase_ )
else:
A__ = OPTConfig()
A__ = OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
_lowerCamelCase : Tuple = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 87 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( A_ ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Tuple:
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCamelCase : List[Any] = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE):
_lowerCamelCase : int = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowerCamelCase : List[str] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and len(SCREAMING_SNAKE_CASE) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE)}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.')
_lowerCamelCase : int = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_lowerCamelCase : int = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE).prev_sample
_lowerCamelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1)
_lowerCamelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_lowerCamelCase : Tuple = self.numpy_to_pil(SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE)
| 88 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowercase : str = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
_lowercase : List[Any] = ''
else:
_lowercase : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase : Any = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
_lowercase : str = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : int = in_proj_weight[
: config.hidden_size, :
]
_lowercase : str = in_proj_bias[: config.hidden_size]
_lowercase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowercase : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Dict = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
_lowercase : List[str] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Union[str, Any] = dct.pop(lowerCamelCase_ )
_lowercase : str = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Any = ViTMSNConfig()
_lowercase : List[str] = 1000
_lowercase : List[Any] = 'datasets/huggingface/label-files'
_lowercase : Dict = 'imagenet-1k-id2label.json'
_lowercase : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ ) , 'r' ) )
_lowercase : Tuple = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Optional[Any] = idalabel
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_lowercase : Any = 384
_lowercase : List[Any] = 1536
_lowercase : Dict = 6
elif "l16" in checkpoint_url:
_lowercase : List[str] = 1024
_lowercase : Optional[int] = 4096
_lowercase : Any = 24
_lowercase : List[str] = 16
_lowercase : Optional[int] = 0.1
elif "b4" in checkpoint_url:
_lowercase : int = 4
elif "l7" in checkpoint_url:
_lowercase : List[Any] = 7
_lowercase : Union[str, Any] = 1024
_lowercase : Union[str, Any] = 4096
_lowercase : str = 24
_lowercase : Any = 16
_lowercase : Any = 0.1
_lowercase : Any = ViTMSNModel(lowerCamelCase_ )
_lowercase : Any = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['target_encoder']
_lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase_ )
_lowercase : Dict = create_rename_keys(lowerCamelCase_ , base_model=lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , base_model=lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
_lowercase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = ViTImageProcessor(
size=config.image_size , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ )
_lowercase : Dict = image_processor(images=lowerCamelCase_ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
_lowercase : Tuple = model(**lowerCamelCase_ )
_lowercase : Optional[int] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_lowercase : int = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
_lowercase : Optional[Any] = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
_lowercase : Union[str, Any] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
_lowercase : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
_lowercase : Optional[Any] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCamelCase_ , atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 89 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A ) -> str:
lowerCAmelCase__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCAmelCase__ = [144, 192, 240]
lowerCAmelCase__ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCAmelCase__ = [96, 120, 144]
lowerCAmelCase__ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCAmelCase__ = [64, 80, 96]
lowerCAmelCase__ = [16, 16, 24, 48, 64, 80, 320]
lowerCAmelCase__ = 0.05
lowerCAmelCase__ = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCAmelCase__ = 512
lowerCAmelCase__ = 16
lowerCAmelCase__ = 21
lowerCAmelCase__ = '''pascal-voc-id2label.json'''
else:
lowerCAmelCase__ = 1000
lowerCAmelCase__ = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( A , A=False ) -> str:
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCAmelCase__ = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCAmelCase__ = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCAmelCase__ = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCAmelCase__ = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCAmelCase__ = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCAmelCase__ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCAmelCase__ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCAmelCase__ = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCAmelCase__ = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCAmelCase__ = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCAmelCase__ = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCAmelCase__ = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCAmelCase__ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCAmelCase__ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCAmelCase__ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCAmelCase__ = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCAmelCase__ = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCAmelCase__ = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCAmelCase__ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCAmelCase__ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCAmelCase__ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCAmelCase__ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCAmelCase__ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCAmelCase__ = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCAmelCase__ = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCAmelCase__ = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCAmelCase__ = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCAmelCase__ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCAmelCase__ = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCAmelCase__ = '''mobilevit.''' + name
return name
def _snake_case ( A , A , A=False ) -> Tuple:
if base_model:
lowerCAmelCase__ = ''''''
else:
lowerCAmelCase__ = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(A )
if key[:8] == "encoder.":
lowerCAmelCase__ = key[8:]
if "qkv" in key:
lowerCAmelCase__ = key.split('''.''' )
lowerCAmelCase__ = int(key_split[0][6:] ) - 1
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCAmelCase__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCAmelCase__ = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = val
return orig_state_dict
def _snake_case ( ) -> Dict:
lowerCAmelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def _snake_case ( A , A , A , A=False ) -> int:
lowerCAmelCase__ = get_mobilevit_config(A )
# load original state_dict
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCAmelCase__ = MobileViTForSemanticSegmentation(A ).eval()
else:
lowerCAmelCase__ = MobileViTForImageClassification(A ).eval()
lowerCAmelCase__ = convert_state_dict(A , A )
model.load_state_dict(A )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCAmelCase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCAmelCase__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCAmelCase__ = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCAmelCase__ = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCAmelCase__ = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowerCAmelCase__ = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowerCAmelCase__ = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowerCAmelCase__ = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , A , atol=1E-4 )
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A )
if push_to_hub:
lowerCAmelCase__ = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCAmelCase__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(A , organization='''apple''' )
model.push_to_hub(A , organization='''apple''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 90 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int = 1000 ):
A = 2**power
A = 0
while n:
A , A = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 91 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCamelCase_ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Path , UpperCAmelCase__ : Union[str, None] = None , UpperCAmelCase__ : Union[List[str], None] = None , UpperCAmelCase__ : Union[str, List[str], None] = None , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
lowercase : List[Any] =[file for file in os.listdir(UpperCAmelCase__ ) if os.path.isfile(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )]
if identifier is not None:
lowercase : List[str] =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
for n_ in n_identifier:
lowercase : Tuple =[file for file in files if n_ not in file]
else:
lowercase : int =[file for file in files if n_identifier not in file]
lowercase : Optional[Any] =ignore_files or []
ignore_files.append('''__init__.py''' )
lowercase : List[str] =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , UpperCAmelCase__ )
if only_modules:
lowercase : Any =file.split('''.''' )[0]
try:
lowercase : Optional[Any] =getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =doctest.DocTestSuite(UpperCAmelCase__ )
lowercase : str =unittest.TextTestRunner().run(UpperCAmelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
lowercase : Optional[int] =doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : str =Path('''src/transformers''' )
lowercase : Optional[Any] ='''modeling'''
lowercase : Dict =[
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(UpperCAmelCase__ , identifier=UpperCAmelCase__ , ignore_files=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : int =Path('''src/transformers''' )
lowercase : List[str] ='''tokenization'''
self.analyze_directory(UpperCAmelCase__ , identifier=UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =Path('''src/transformers''' )
lowercase : Any ='''configuration'''
self.analyze_directory(UpperCAmelCase__ , identifier=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : str =Path('''src/transformers''' )
lowercase : List[str] =['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(UpperCAmelCase__ , n_identifier=UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =Path('''docs/source''' )
lowercase : List[Any] =['''favicon.ico''']
self.analyze_directory(UpperCAmelCase__ , ignore_files=UpperCAmelCase__ , only_modules=UpperCAmelCase__ )
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = 5_0 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__UpperCAmelCase , )
lowerCAmelCase__ :Tuple = image.to(self.device )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase__ :Union[str, Any] = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase__ :int = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
lowerCAmelCase__ :int = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase__ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ :Union[str, Any] = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=__UpperCAmelCase ), "This is a local test"
| 93 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''vit'''
def __init__( self : int , lowerCAmelCase_ : Any=768 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : Optional[int]=12 , lowerCAmelCase_ : List[str]=3_072 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[int]=0.0_2 , lowerCAmelCase_ : Optional[Any]=1e-12 , lowerCAmelCase_ : Any=224 , lowerCAmelCase_ : str=16 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=16 , **lowerCAmelCase_ : Union[str, Any] , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Optional[int] = qkv_bias
UpperCAmelCase_ : Tuple = encoder_stride
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> float:
return 1e-4
| 95 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = field(default="automatic-speech-recognition" ,metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase__ = Features({"audio": Audio()} )
UpperCAmelCase__ = Features({"transcription": Value("string" )} )
UpperCAmelCase__ = "audio"
UpperCAmelCase__ = "transcription"
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Any:
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , __snake_case ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
__magic_name__: List[str] = copy.deepcopy(self )
__magic_name__: Tuple = self.input_schema.copy()
__magic_name__: Dict = features[self.audio_column]
__magic_name__: List[str] = input_schema
return task_template
@property
def lowerCamelCase__ ( self : List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 96 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a ( snake_case__: List[Any] , snake_case__: Optional[Any] ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowercase_ = flax_key_tuple[:-1] + ('''weight''',)
lowercase_ = torch.permute(snake_case__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ):
# linear layer
lowercase_ = flax_key_tuple[:-1] + ('''weight''',)
lowercase_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase_ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def a ( snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Any ):
'''simple docstring'''
if "metadata" in layer:
lowercase_ = layer.split('''metadata''' )
lowercase_ = ''''''.join(split_layer[0] )[:-1]
lowercase_ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
lowercase_ = layer.split('''kvstore''' )
lowercase_ = ''''''.join(split_layer[0] )[:-1]
lowercase_ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
lowercase_ = layer.split('''/''' )
lowercase_ = '''/'''.join(split_layer[:-1] )
lowercase_ = (split_layer[-1],)
if "kvstore/path" in layer:
lowercase_ = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
lowercase_ = '''file'''
else:
lowercase_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a ( snake_case__: Optional[Any] , snake_case__: Any ):
'''simple docstring'''
lowercase_ = rename_keys(snake_case__ )
lowercase_ = {}
for k, v in current_block.items():
lowercase_ = v
lowercase_ = new_current_block
torch.save(snake_case__ , snake_case__ )
def a ( snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Union[str, Any] , snake_case__: Dict , snake_case__: str = WEIGHTS_NAME ):
'''simple docstring'''
lowercase_ = convert_file_size_to_int(snake_case__ )
lowercase_ = []
lowercase_ = {}
lowercase_ = 0
lowercase_ = 0
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
lowercase_ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
lowercase_ = flatten_dict(snake_case__ , sep='''/''' )
lowercase_ = {}
for layer in checkpoint_info.keys():
lowercase_ , lowercase_ , lowercase_ = get_key_and_tensorstore_dict(
snake_case__ , snake_case__ , snake_case__ )
if curr_real_layer_name in all_layers:
lowercase_ = content
else:
lowercase_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowercase_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowercase_ = torch.tensor(snake_case__ )
lowercase_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowercase_ , lowercase_ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , snake_case__ )
lowercase_ = '''/'''.join(snake_case__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowercase_ = os.path.join(
snake_case__ , weights_name.replace('''.bin''' , F'''-{len(snake_case__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowercase_ = {}
lowercase_ = 0
lowercase_ = raw_weights.to(getattr(snake_case__ , snake_case__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowercase_ = os.path.join(snake_case__ , weights_name.replace('''.bin''' , F'''-{len(snake_case__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowercase_ = {}
lowercase_ = {}
for idx, shard in enumerate(snake_case__ ):
lowercase_ = weights_name.replace(
'''.bin''' , F'''-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
lowercase_ = os.path.join(snake_case__ , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
lowercase_ = shard
for key in shard:
lowercase_ = shard_file
# Add the metadata
lowercase_ = {'''total_size''': total_size}
lowercase_ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' ) as f:
lowercase_ = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n'''
f.write(snake_case__ )
return metadata, index
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__a = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowercase_ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
lowercase_ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
lowercase_ = TaTokenizer.from_pretrained('''t5-small''' )
lowercase_ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
lowercase_ = tokenizer(snake_case__ , return_tensors='''pt''' ).input_ids
lowercase_ = model.generate(snake_case__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case : ClassVar[Features] = Features({'audio': Audio()} )
_snake_case : ClassVar[Features] = Features({'labels': ClassLabel} )
_snake_case : str = "audio"
_snake_case : str = "labels"
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_UpperCamelCase = copy.deepcopy(self )
_UpperCamelCase = self.label_schema.copy()
_UpperCamelCase = features[self.label_column]
_UpperCamelCase = label_schema
return task_template
@property
def snake_case__ ( self : int ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 98 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Any = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 100 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 0 |
from __future__ import annotations
def a__ ( A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = result + left + right
return input_list
def a__ ( A__ ):
if len(A__ ) <= 1:
return input_list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(A__ )
# iteration for two-way merging
SCREAMING_SNAKE_CASE_ : Any = 2
while p <= len(A__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0, len(A__ ), A__ ):
SCREAMING_SNAKE_CASE_ : str = i
SCREAMING_SNAKE_CASE_ : Union[str, Any] = i + p - 1
SCREAMING_SNAKE_CASE_ : Optional[int] = (low + high + 1) // 2
SCREAMING_SNAKE_CASE_ : List[Any] = merge(A__, A__, A__, A__ )
# final merge of last two parts
if p * 2 >= len(A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = i
SCREAMING_SNAKE_CASE_ : int = merge(A__, 0, A__, len(A__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] =input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
lowerCAmelCase__ : Dict =[]
else:
lowerCAmelCase__ : Optional[int] =[int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 101 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
"""simple docstring"""
@staticmethod
def _a ( *_A , **_A ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Any = ObjectDetectionPipeline(model=_A , image_processor=_A )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : Any = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(_A ) , 0 )
for detected_object in outputs:
self.assertEqual(
_A , {
"""score""": ANY(_A ),
"""label""": ANY(_A ),
"""box""": {"""xmin""": ANY(_A ), """ymin""": ANY(_A ), """xmax""": ANY(_A ), """ymax""": ANY(_A )},
} , )
import datasets
UpperCamelCase : int = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
UpperCamelCase : Optional[int] = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
UpperCamelCase : Dict = object_detector(_A , threshold=0.0 )
self.assertEqual(len(_A ) , len(_A ) )
for outputs in batch_outputs:
self.assertGreater(len(_A ) , 0 )
for detected_object in outputs:
self.assertEqual(
_A , {
"""score""": ANY(_A ),
"""label""": ANY(_A ),
"""box""": {"""xmin""": ANY(_A ), """ymin""": ANY(_A ), """xmax""": ANY(_A ), """ymax""": ANY(_A )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def _a ( self ):
'''simple docstring'''
pass
@require_torch
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = """hf-internal-testing/tiny-detr-mobilenetsv3"""
UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(_A )
UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(_A )
UpperCamelCase : Tuple = ObjectDetectionPipeline(model=_A , feature_extractor=_A )
UpperCamelCase : Dict = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
] , )
UpperCamelCase : int = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
],
[
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
],
] , )
@require_torch
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = """facebook/detr-resnet-50"""
UpperCamelCase : int = AutoModelForObjectDetection.from_pretrained(_A )
UpperCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A )
UpperCamelCase : Dict = ObjectDetectionPipeline(model=_A , feature_extractor=_A )
UpperCamelCase : Dict = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
] , )
UpperCamelCase : Optional[int] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
] , )
@require_torch
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = """facebook/detr-resnet-50"""
UpperCamelCase : Optional[Any] = pipeline("""object-detection""" , model=_A )
UpperCamelCase : Optional[Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
] , )
UpperCamelCase : int = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
] , )
@require_torch
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = 0.99_85
UpperCamelCase : Tuple = """facebook/detr-resnet-50"""
UpperCamelCase : Dict = pipeline("""object-detection""" , model=_A )
UpperCamelCase : str = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=_A )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = """Narsil/layoutlmv3-finetuned-funsd"""
UpperCamelCase : Optional[int] = 0.99_93
UpperCamelCase : List[Any] = pipeline("""object-detection""" , model=_A , threshold=_A )
UpperCamelCase : Tuple = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"""score""": 0.99_93, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}},
{"""score""": 0.99_93, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}},
] , )
| 102 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
snake_case = logging.get_logger(__name__)
# General docstring
snake_case = '''MobileNetV1Config'''
# Base docstring
snake_case = '''google/mobilenet_v1_1.0_224'''
snake_case = [1, 1_0_2_4, 7, 7]
# Image classification docstring
snake_case = '''google/mobilenet_v1_1.0_224'''
snake_case = '''tabby, tabby cat'''
snake_case = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Dict:
_snake_case = {}
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model.mobilenet_va
else:
_snake_case = model
_snake_case = '''MobilenetV1/Conv2d_0/'''
_snake_case = backbone.conv_stem.convolution.weight
_snake_case = backbone.conv_stem.normalization.bias
_snake_case = backbone.conv_stem.normalization.weight
_snake_case = backbone.conv_stem.normalization.running_mean
_snake_case = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_snake_case = i + 1
_snake_case = i * 2
_snake_case = backbone.layer[pt_index]
_snake_case = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
_snake_case = pointer.convolution.weight
_snake_case = pointer.normalization.bias
_snake_case = pointer.normalization.weight
_snake_case = pointer.normalization.running_mean
_snake_case = pointer.normalization.running_var
_snake_case = backbone.layer[pt_index + 1]
_snake_case = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
_snake_case = pointer.convolution.weight
_snake_case = pointer.normalization.bias
_snake_case = pointer.normalization.weight
_snake_case = pointer.normalization.running_mean
_snake_case = pointer.normalization.running_var
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
_snake_case = model.classifier.weight
_snake_case = model.classifier.bias
return tf_to_pt_map
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
_snake_case = tf.train.list_variables(lowerCAmelCase_ )
_snake_case = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
_snake_case = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = array
# Build TF to PyTorch weights loading map
_snake_case = _build_tf_to_pytorch_map(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
_snake_case = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
_snake_case = np.transpose(lowerCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
_snake_case = array.squeeze().transpose()
else:
_snake_case = np.transpose(lowerCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
_snake_case = torch.from_numpy(lowerCAmelCase_ )
tf_weights.pop(lowerCAmelCase_ , lowerCAmelCase_ )
tf_weights.pop(name + '''/RMSProp''' , lowerCAmelCase_ )
tf_weights.pop(name + '''/RMSProp_1''' , lowerCAmelCase_ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , lowerCAmelCase_ )
logger.info(f"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> torch.Tensor:
_snake_case , _snake_case = features.shape[-2:]
_snake_case , _snake_case = conv_layer.stride
_snake_case , _snake_case = conv_layer.kernel_size
if in_height % stride_height == 0:
_snake_case = max(kernel_height - stride_height , 0 )
else:
_snake_case = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_snake_case = max(kernel_width - stride_width , 0 )
else:
_snake_case = max(kernel_width - (in_width % stride_width) , 0 )
_snake_case = pad_along_width // 2
_snake_case = pad_along_width - pad_left
_snake_case = pad_along_height // 2
_snake_case = pad_along_height - pad_top
_snake_case = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCAmelCase_ , lowerCAmelCase_ , '''constant''' , 0.0 )
class UpperCAmelCase ( nn.Module ):
def __init__( self : int , __lowerCamelCase : MobileNetVaConfig , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
_snake_case = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
_snake_case = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_snake_case = nn.Convad(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=__lowerCamelCase , stride=__lowerCamelCase , padding=__lowerCamelCase , groups=__lowerCamelCase , bias=__lowerCamelCase , padding_mode='''zeros''' , )
if use_normalization:
_snake_case = nn.BatchNormad(
num_features=__lowerCamelCase , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=__lowerCamelCase , track_running_stats=__lowerCamelCase , )
else:
_snake_case = None
if use_activation:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowerCamelCase ):
_snake_case = ACTaFN[config.hidden_act]
else:
_snake_case = config.hidden_act
else:
_snake_case = None
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
_snake_case = apply_tf_padding(__lowerCamelCase , self.convolution )
_snake_case = self.convolution(__lowerCamelCase )
if self.normalization is not None:
_snake_case = self.normalization(__lowerCamelCase )
if self.activation is not None:
_snake_case = self.activation(__lowerCamelCase )
return features
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : str = MobileNetVaConfig
A__ : Dict = load_tf_weights_in_mobilenet_va
A__ : List[str] = '''mobilenet_v1'''
A__ : Optional[Any] = '''pixel_values'''
A__ : List[Any] = False
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
snake_case = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
snake_case = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''',__SCREAMING_SNAKE_CASE,)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , __lowerCamelCase : MobileNetVaConfig , __lowerCamelCase : bool = True ):
"""simple docstring"""
super().__init__(__lowerCamelCase )
_snake_case = config
_snake_case = 3_2
_snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
_snake_case = MobileNetVaConvLayer(
__lowerCamelCase , in_channels=config.num_channels , out_channels=__lowerCamelCase , kernel_size=3 , stride=2 , )
_snake_case = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_snake_case = nn.ModuleList()
for i in range(1_3 ):
_snake_case = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=1 , ) )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
_snake_case = self.conv_stem(__lowerCamelCase )
_snake_case = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_snake_case = layer_module(__lowerCamelCase )
if output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
_snake_case = hidden_states
if self.pooler is not None:
_snake_case = torch.flatten(self.pooler(__lowerCamelCase ) , start_dim=1 )
else:
_snake_case = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase , pooler_output=__lowerCamelCase , hidden_states=__lowerCamelCase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''',__SCREAMING_SNAKE_CASE,)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : int , __lowerCamelCase : MobileNetVaConfig ):
"""simple docstring"""
super().__init__(__lowerCamelCase )
_snake_case = config.num_labels
_snake_case = MobileNetVaModel(__lowerCamelCase )
_snake_case = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_snake_case = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCamelCase )
_snake_case = nn.Linear(__lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.mobilenet_va(__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(self.dropout(__lowerCamelCase ) )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = '''single_label_classification'''
else:
_snake_case = '''multi_label_classification'''
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(__lowerCamelCase , __lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(__lowerCamelCase , __lowerCamelCase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states , )
| 103 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0, 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0, 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1, 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1, 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 104 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="attention" ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = params[F'{prefix}/layers_{i}/{layer_name}/key/kernel']
SCREAMING_SNAKE_CASE_ : str = params[F'{prefix}/layers_{i}/{layer_name}/out/kernel']
SCREAMING_SNAKE_CASE_ : Optional[int] = params[F'{prefix}/layers_{i}/{layer_name}/query/kernel']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = params[F'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=False ) -> Dict:
"""simple docstring"""
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = params[F'{prefix}/layers_{i}/mlp/wi_0/kernel']
SCREAMING_SNAKE_CASE_ : int = params[F'{prefix}/layers_{i}/mlp/wi_1/kernel']
SCREAMING_SNAKE_CASE_ : Any = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE_ : int = params[F'{prefix}/layers_{i}/mlp/wi/kernel']
SCREAMING_SNAKE_CASE_ : str = params[F'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ) -> Any:
"""simple docstring"""
return params[F'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCAmelCase ( lowerCamelCase_ : dict , *, lowerCamelCase_ : int , lowerCamelCase_ : bool ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = traverse_util.flatten_dict(variables['target'] )
SCREAMING_SNAKE_CASE_ : str = {'/'.join(lowerCamelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE_ : Tuple = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = old['token_embedder/embedding']
# Encoder.
for i in range(lowerCamelCase_ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE_ : Optional[Any] = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , 'pre_attention_layer_norm' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , 'attention' )
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm
SCREAMING_SNAKE_CASE_ : List[Any] = k.T
SCREAMING_SNAKE_CASE_ : Any = o.T
SCREAMING_SNAKE_CASE_ : List[str] = q.T
SCREAMING_SNAKE_CASE_ : List[Any] = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE_ : Dict = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , 'pre_mlp_layer_norm' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = tax_mlp_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ : Optional[Any] = wi[0].T
SCREAMING_SNAKE_CASE_ : str = wi[1].T
else:
SCREAMING_SNAKE_CASE_ : List[str] = wi.T
SCREAMING_SNAKE_CASE_ : Tuple = wo.T
SCREAMING_SNAKE_CASE_ : str = old[
'encoder/relpos_bias/rel_embedding'
].T
SCREAMING_SNAKE_CASE_ : Any = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(lowerCamelCase_ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE_ : str = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'pre_self_attention_layer_norm' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'self_attention' )
SCREAMING_SNAKE_CASE_ : int = layer_norm
SCREAMING_SNAKE_CASE_ : Any = k.T
SCREAMING_SNAKE_CASE_ : Tuple = o.T
SCREAMING_SNAKE_CASE_ : Optional[Any] = q.T
SCREAMING_SNAKE_CASE_ : Any = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE_ : Dict = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'pre_cross_attention_layer_norm' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'encoder_decoder_attention' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm
SCREAMING_SNAKE_CASE_ : str = k.T
SCREAMING_SNAKE_CASE_ : List[str] = o.T
SCREAMING_SNAKE_CASE_ : Union[str, Any] = q.T
SCREAMING_SNAKE_CASE_ : List[str] = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE_ : str = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'pre_mlp_layer_norm' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = tax_mlp_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ : Optional[int] = wi[0].T
SCREAMING_SNAKE_CASE_ : Dict = wi[1].T
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = wi.T
SCREAMING_SNAKE_CASE_ : Any = wo.T
SCREAMING_SNAKE_CASE_ : Tuple = old['decoder/decoder_norm/scale']
SCREAMING_SNAKE_CASE_ : Optional[int] = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE_ : Any = old['decoder/logits_dense/kernel'].T
return new
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : bool ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE_ : str = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
SCREAMING_SNAKE_CASE_ : Dict = state_dict['shared.weight']
return state_dict
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = convert_tax_to_pytorch(lowerCamelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = make_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : bool = False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Tuple = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCamelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCamelCase_ )
print('Done' )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
UpperCamelCase__ : Dict = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 105 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase_ ( lowerCAmelCase__ : NDArray[floataa] , lowerCAmelCase__ : NDArray[floataa] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int , ) -> list[float]:
'''simple docstring'''
A , A = coefficient_matrix.shape
A , A = constant_matrix.shape
if rowsa != colsa:
A = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowerCAmelCase__ )
if colsa != 1:
A = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowerCAmelCase__ )
if rowsa != rowsa:
A = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != rowsa:
A = (
'Number of initial values must be equal to number of rows in coefficient '
F'''matrix but received {len(lowerCAmelCase__ )} and {rowsa}'''
)
raise ValueError(lowerCAmelCase__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
A = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
A , A = table.shape
strictly_diagonally_dominant(lowerCAmelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCAmelCase__ ):
A = []
for row in range(lowerCAmelCase__ ):
A = 0
for col in range(lowerCAmelCase__ ):
if col == row:
A = table[row][col]
elif col == cols - 1:
A = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A = (temp + val) / denom
new_val.append(lowerCAmelCase__ )
A = new_val
return [float(lowerCAmelCase__ ) for i in new_val]
def lowerCamelCase_ ( lowerCAmelCase__ : NDArray[floataa] ) -> bool:
'''simple docstring'''
A , A = table.shape
A = True
for i in range(0 , lowerCAmelCase__ ):
A = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple, UpperCamelCase__ : list[tuple[float, float]] ) -> Any:
_A = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_A = len(UpperCamelCase__ ) - 1
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree, UpperCamelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCamelCase__ ), 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A = self.basis_function(UpperCamelCase__ )
_A = 0.0
_A = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : float = 0.01 ) -> List[Any]:
from matplotlib import pyplot as plt # type: ignore
_A = [] # x coordinates of points to plot
_A = [] # y coordinates of points to plot
_A = 0.0
while t <= 1:
_A = self.bezier_curve_function(UpperCamelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_A = [i[0] for i in self.list_of_points]
_A = [i[1] for i in self.list_of_points]
plt.plot(
UpperCamelCase__, UpperCamelCase__, color='blue', label='Curve of Degree ' + str(self.degree ), )
plt.scatter(UpperCamelCase__, UpperCamelCase__, color='red', label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 107 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__a: Any = TypeVar('''T''')
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
'''simple docstring'''
_lowerCamelCase = 42 # Cache store of keys
_lowerCamelCase = 42 # References of the keys in cache
_lowerCamelCase = 10 # Maximum capacity of cache
def __init__( self : Optional[int] , lowerCamelCase : int ) -> None:
"""simple docstring"""
_UpperCAmelCase = deque()
_UpperCAmelCase = set()
if not n:
_UpperCAmelCase = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
_UpperCAmelCase = n
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : T ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCAmelCase = self.dq_store.pop()
self.key_reference.remove(lowerCamelCase )
else:
self.dq_store.remove(lowerCamelCase )
self.dq_store.appendleft(lowerCamelCase )
self.key_reference.add(lowerCamelCase )
def lowerCamelCase ( self : str ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(lowerCamelCase )
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__a: LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 108 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __a ( metaclass=_snake_case ):
__UpperCamelCase : Optional[Any] = ['onnx']
def __init__( self : List[str] ,*lowerCamelCase : str ,**lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(self ,["""onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] ,*lowerCamelCase : str ,**lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple ,*lowerCamelCase : Dict ,**lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls ,["""onnx"""] )
| 109 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 0 |
"""simple docstring"""
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
# Base Case
if curr_ind == len(_snake_case ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 ,len(_snake_case ) ):
if valid_connection(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# Insert current vertex into path as next transition
UpperCAmelCase__ : str = next_ver
# Validate created path
if util_hamilton_cycle(_snake_case ,_snake_case ,curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase__ : int = -1
return False
def lowerCamelCase ( _snake_case ,_snake_case = 0 ):
UpperCAmelCase__ : str = [-1] * (len(_snake_case ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase__ : Optional[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_snake_case ,_snake_case ,1 ) else []
| 110 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def a_ () -> Union[str, Any]:
"""simple docstring"""
__a : Any = HfArgumentParser(A__ )
__a : Dict = parser.parse_args_into_dataclasses()[0]
__a : str = TensorFlowBenchmark(args=A__ )
try:
__a : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__a : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__a : Dict = " ".join(str(A__ ).split(" " )[:-1] )
__a : Union[str, Any] = ""
__a : Union[str, Any] = eval(str(A__ ).split(" " )[-1] )
__a : Optional[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A__ )
if len(A__ ) > 0:
__a : Any = full_error_msg + begin_error_msg + str(A__ )
raise ValueError(A__ )
benchmark.run()
if __name__ == "__main__":
main()
| 351 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class lowerCAmelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = 'mvp'
_SCREAMING_SNAKE_CASE : str = ['past_key_values']
_SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , _SCREAMING_SNAKE_CASE : str=50_267 , _SCREAMING_SNAKE_CASE : List[str]=1_024 , _SCREAMING_SNAKE_CASE : Union[str, Any]=12 , _SCREAMING_SNAKE_CASE : Optional[int]=4_096 , _SCREAMING_SNAKE_CASE : Tuple=16 , _SCREAMING_SNAKE_CASE : Union[str, Any]=12 , _SCREAMING_SNAKE_CASE : Union[str, Any]=4_096 , _SCREAMING_SNAKE_CASE : Optional[int]=16 , _SCREAMING_SNAKE_CASE : int=0.0 , _SCREAMING_SNAKE_CASE : Any=0.0 , _SCREAMING_SNAKE_CASE : Optional[int]="gelu" , _SCREAMING_SNAKE_CASE : Dict=1_024 , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=0.0 , _SCREAMING_SNAKE_CASE : Optional[int]=0.0 , _SCREAMING_SNAKE_CASE : Any=0.0_2 , _SCREAMING_SNAKE_CASE : Any=0.0 , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : Optional[int]=1 , _SCREAMING_SNAKE_CASE : int=0 , _SCREAMING_SNAKE_CASE : List[Any]=2 , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : str=2 , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : List[str]=100 , _SCREAMING_SNAKE_CASE : int=800 , **_SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = d_model
SCREAMING_SNAKE_CASE : List[str] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : Tuple = encoder_attention_heads
SCREAMING_SNAKE_CASE : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Any = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : int = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
SCREAMING_SNAKE_CASE : Dict = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : List[Any] = use_prompt
SCREAMING_SNAKE_CASE : Any = prompt_length
SCREAMING_SNAKE_CASE : Union[str, Any] = prompt_mid_dim
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , lowercase__ ):
SCREAMING_SNAKE_CASE : Tuple = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'The config can simply be saved and uploaded again to be fixed.' )
| 265 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 0 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCAmelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=8 ):
__magic_name__ : Optional[Any] =h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__magic_name__ : str =w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __A ( lowerCamelCase__ ):
def __init__( self :List[Any] , __snake_case :MultilingualCLIP , __snake_case :XLMRobertaTokenizer , __snake_case :UNetaDConditionModel , __snake_case :Union[DDIMScheduler, DDPMScheduler] , __snake_case :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=lowercase__ , tokenizer=lowercase__ , unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__magic_name__ : Dict =2 ** (len(self.movq.config.block_out_channels ) - 1)
def A__ ( self :Tuple , __snake_case :Dict , __snake_case :List[Any] , __snake_case :Dict , __snake_case :Union[str, Any] , __snake_case :int , __snake_case :Dict ):
'''simple docstring'''
if latents is None:
__magic_name__ : str =randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__magic_name__ : Optional[Any] =latents.to(lowercase__ )
__magic_name__ : List[Any] =latents * scheduler.init_noise_sigma
return latents
def A__ ( self :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] , __snake_case :Any , __snake_case :List[Any] , __snake_case :Tuple=None , ):
'''simple docstring'''
__magic_name__ : int =len(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else 1
# get prompt text embeddings
__magic_name__ : List[Any] =self.tokenizer(
lowercase__ , padding="""max_length""" , truncation=lowercase__ , max_length=77 , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="""pt""" , )
__magic_name__ : Optional[Any] =text_inputs.input_ids
__magic_name__ : Any =self.tokenizer(lowercase__ , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowercase__ , lowercase__ ):
__magic_name__ : Any =self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__magic_name__ : str =text_input_ids.to(lowercase__ )
__magic_name__ : Tuple =text_inputs.attention_mask.to(lowercase__ )
__magic_name__ , __magic_name__ : List[str] =self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
__magic_name__ : List[str] =prompt_embeds.repeat_interleave(lowercase__ , dim=0 )
__magic_name__ : Dict =text_encoder_hidden_states.repeat_interleave(lowercase__ , dim=0 )
__magic_name__ : Union[str, Any] =text_mask.repeat_interleave(lowercase__ , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : str =42
if negative_prompt is None:
__magic_name__ : str =[""""""] * batch_size
elif type(lowercase__ ) is not type(lowercase__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase__ )} !="
f" {type(lowercase__ )}." )
elif isinstance(lowercase__ , lowercase__ ):
__magic_name__ : Any =[negative_prompt]
elif batch_size != len(lowercase__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(lowercase__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""" )
else:
__magic_name__ : Tuple =negative_prompt
__magic_name__ : Tuple =self.tokenizer(
lowercase__ , padding="""max_length""" , max_length=77 , truncation=lowercase__ , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="""pt""" , )
__magic_name__ : str =uncond_input.input_ids.to(lowercase__ )
__magic_name__ : List[Any] =uncond_input.attention_mask.to(lowercase__ )
__magic_name__ , __magic_name__ : List[Any] =self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ : Union[str, Any] =negative_prompt_embeds.shape[1]
__magic_name__ : List[str] =negative_prompt_embeds.repeat(1 , lowercase__ )
__magic_name__ : int =negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase__ )
__magic_name__ : List[Any] =uncond_text_encoder_hidden_states.shape[1]
__magic_name__ : Any =uncond_text_encoder_hidden_states.repeat(1 , lowercase__ , 1 )
__magic_name__ : List[Any] =uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowercase__ , -1 )
__magic_name__ : Union[str, Any] =uncond_text_mask.repeat_interleave(lowercase__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : Union[str, Any] =torch.cat([negative_prompt_embeds, prompt_embeds] )
__magic_name__ : List[str] =torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__magic_name__ : int =torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def A__ ( self :List[str] , __snake_case :List[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__magic_name__ : Tuple =torch.device(f"cuda:{gpu_id}" )
__magic_name__ : Optional[Any] =[
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__ )
def A__ ( self :Dict , __snake_case :Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__magic_name__ : str =torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowercase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__magic_name__ : str =None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__magic_name__ , __magic_name__ : List[str] =cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__ )
if self.safety_checker is not None:
__magic_name__ , __magic_name__ : Any =cpu_offload_with_hook(self.safety_checker , lowercase__ , prev_module_hook=lowercase__ )
# We'll offload the last model manually.
__magic_name__ : List[Any] =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A__ ( self :Dict ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__ )
def __call__( self :Union[str, Any] , __snake_case :Union[str, List[str]] , __snake_case :Union[torch.FloatTensor, List[torch.FloatTensor]] , __snake_case :Union[torch.FloatTensor, List[torch.FloatTensor]] , __snake_case :Optional[Union[str, List[str]]] = None , __snake_case :int = 5_12 , __snake_case :int = 5_12 , __snake_case :int = 1_00 , __snake_case :float = 4.0 , __snake_case :int = 1 , __snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case :Optional[torch.FloatTensor] = None , __snake_case :Optional[str] = "pil" , __snake_case :bool = True , ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
__magic_name__ : Any =1
elif isinstance(lowercase__ , lowercase__ ):
__magic_name__ : Union[str, Any] =len(lowercase__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}" )
__magic_name__ : Any =self._execution_device
__magic_name__ : int =batch_size * num_images_per_prompt
__magic_name__ : Union[str, Any] =guidance_scale > 1.0
__magic_name__ , __magic_name__ , __magic_name__ : Dict =self._encode_prompt(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
__magic_name__ : int =torch.cat(lowercase__ , dim=0 )
if isinstance(lowercase__ , lowercase__ ):
__magic_name__ : Dict =torch.cat(lowercase__ , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : Dict =image_embeds.repeat_interleave(lowercase__ , dim=0 )
__magic_name__ : Optional[int] =negative_image_embeds.repeat_interleave(lowercase__ , dim=0 )
__magic_name__ : List[str] =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowercase__ )
self.scheduler.set_timesteps(lowercase__ , device=lowercase__ )
__magic_name__ : Dict =self.scheduler.timesteps
__magic_name__ : int =self.unet.config.in_channels
__magic_name__ , __magic_name__ : Any =get_new_h_w(lowercase__ , lowercase__ , self.movq_scale_factor )
# create initial latent
__magic_name__ : int =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
__magic_name__ : str =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ : int ={"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
__magic_name__ : List[Any] =self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ : Tuple =noise_pred.split(latents.shape[1] , dim=1 )
__magic_name__ , __magic_name__ : List[Any] =noise_pred.chunk(2 )
__magic_name__ , __magic_name__ : str =variance_pred.chunk(2 )
__magic_name__ : Tuple =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__magic_name__ : List[str] =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__magic_name__ , __magic_name__ : Dict =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : Tuple =self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , ).prev_sample
# post-processing
__magic_name__ : Union[str, Any] =self.movq.decode(lowercase__ , force_not_quantize=lowercase__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__magic_name__ : Optional[Any] =image * 0.5 + 0.5
__magic_name__ : Dict =image.clamp(0 , 1 )
__magic_name__ : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ : Optional[int] =self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 21 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase ={
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 617 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
'''simple docstring'''
UpperCAmelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 673 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( lowerCamelCase__,unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] = XGLMTokenizer
a_ : int = XGLMTokenizerFast
a_ : List[str] = True
a_ : Tuple = True
def _snake_case ( self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : Optional[Any] = XGLMTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = """<pad>"""
__lowerCamelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 1_0_0_8 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = XGLMTokenizer(lowercase__ , keep_accents=lowercase__ )
__lowerCamelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__lowerCamelCase : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self : int ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase__ , f.name )
__lowerCamelCase : Dict = XGLMTokenizer(f.name , keep_accents=lowercase__ )
__lowerCamelCase : Dict = pickle.dumps(lowercase__ )
pickle.loads(lowercase__ )
def _snake_case ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCamelCase : Optional[int] = self.get_tokenizer()
__lowerCamelCase : List[str] = self.get_rust_tokenizer()
__lowerCamelCase : str = """I was born in 92000, and this is falsé."""
__lowerCamelCase : int = tokenizer.tokenize(lowercase__ )
__lowerCamelCase : Union[str, Any] = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
__lowerCamelCase : str = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__lowerCamelCase : List[Any] = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
__lowerCamelCase : Any = self.get_rust_tokenizer()
__lowerCamelCase : List[str] = tokenizer.encode(lowercase__ )
__lowerCamelCase : int = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
@slow
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = """Hello World!"""
__lowerCamelCase : List[Any] = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@slow
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Dict = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
__lowerCamelCase : Any = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@slow
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Any = {
"""input_ids""": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""facebook/xglm-564M""" , padding=lowercase__ , )
| 519 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def a_ ( __a , __a ):
for e in env_keys:
A__ = int(os.environ.get(A__ , -1 ) )
if val >= 0:
return val
return default
def a_ ( __a , __a=False ):
A__ = os.environ.get(A__ , str(A__ ) )
return strtobool(A__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def a_ ( __a , __a="no" ):
A__ = os.environ.get(A__ , str(A__ ) )
return value
| 571 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__SCREAMING_SNAKE_CASE ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE =direct_transformers_import(PATH_TO_TRANSFORMERS)
__SCREAMING_SNAKE_CASE =transformers.models.auto.configuration_auto.CONFIG_MAPPING
__SCREAMING_SNAKE_CASE ={
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"config.{attribute}" in modeling_source
or F"getattr(config, \"{attribute}\"" in modeling_source
or F"getattr(self.config, \"{attribute}\"" in modeling_source
):
SCREAMING_SNAKE_CASE_ = True
# Deal with multi-line cases
elif (
re.search(
RF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , A__ , )
is not None
):
SCREAMING_SNAKE_CASE_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
SCREAMING_SNAKE_CASE_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
SCREAMING_SNAKE_CASE_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
SCREAMING_SNAKE_CASE_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
SCREAMING_SNAKE_CASE_ = True
if not attribute_used:
SCREAMING_SNAKE_CASE_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
SCREAMING_SNAKE_CASE_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
SCREAMING_SNAKE_CASE_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
SCREAMING_SNAKE_CASE_ = True
elif attribute.endswith('''_token_id''' ):
SCREAMING_SNAKE_CASE_ = True
# configuration class specific cases
if not case_allowed:
SCREAMING_SNAKE_CASE_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
SCREAMING_SNAKE_CASE_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = dict(inspect.signature(config_class.__init__ ).parameters )
SCREAMING_SNAKE_CASE_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
SCREAMING_SNAKE_CASE_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
SCREAMING_SNAKE_CASE_ = {}
if len(config_class.attribute_map ) > 0:
SCREAMING_SNAKE_CASE_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
SCREAMING_SNAKE_CASE_ = inspect.getsourcefile(A__ )
SCREAMING_SNAKE_CASE_ = os.path.dirname(A__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
SCREAMING_SNAKE_CASE_ = [os.path.join(A__ , A__ ) for fn in os.listdir(A__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
SCREAMING_SNAKE_CASE_ = []
for path in modeling_paths:
if os.path.isfile(A__ ):
with open(A__ ) as fp:
modeling_sources.append(fp.read() )
SCREAMING_SNAKE_CASE_ = []
for config_param, default_value in zip(A__ , A__ ):
# `attributes` here is all the variant names for `config_param`
SCREAMING_SNAKE_CASE_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A__ , A__ , A__ , A__ ):
unused_attributes.append(attributes[0] )
return sorted(A__ )
def a ():
SCREAMING_SNAKE_CASE_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
SCREAMING_SNAKE_CASE_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _lowerCAmelCase : inspect.isclass(A__ )
and issubclass(A__ , A__ )
and inspect.getmodule(A__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
SCREAMING_SNAKE_CASE_ = check_config_attributes_being_used(A__ )
if len(A__ ) > 0:
SCREAMING_SNAKE_CASE_ = unused_attributes
if len(A__ ) > 0:
SCREAMING_SNAKE_CASE_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"{name}: {attributes}\n"
raise ValueError(A__ )
if __name__ == "__main__":
check_config_attributes()
| 234 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 0 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowercase__ : str = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A__ ) )
return round(A__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 152 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 0 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__A = False
__A = logging.get_logger(__name__)
__A = "ybelkada/fonts"
def SCREAMING_SNAKE_CASE__ ( ) -> str:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
lowercase__: List[Any] = image_tensor.unsqueeze(0 )
lowercase__: Optional[int] = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase__: Dict = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
lowercase__: Tuple = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = 3_6 , __UpperCAmelCase = "black" , __UpperCAmelCase = "white" , __UpperCAmelCase = 5 , __UpperCAmelCase = 5 , __UpperCAmelCase = 5 , __UpperCAmelCase = 5 , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> Any:
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
lowercase__: List[str] = textwrap.TextWrapper(width=8_0 )
lowercase__: Any = wrapper.wrap(text=A__ )
lowercase__: int = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
lowercase__: List[str] = io.BytesIO(A__ )
elif font_path is not None:
lowercase__: Optional[int] = font_path
else:
lowercase__: Optional[Any] = hf_hub_download(A__ , '''Arial.TTF''' )
lowercase__: Union[str, Any] = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase__: Union[str, Any] = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
lowercase__, lowercase__, lowercase__, lowercase__: str = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
lowercase__: str = text_width + left_padding + right_padding
lowercase__: str = text_height + top_padding + bottom_padding
lowercase__: Dict = Image.new('''RGB''' , (image_width, image_height) , A__ )
lowercase__: Optional[Any] = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
lowercase__: Any = to_pil_image(A__ )
lowercase__: Optional[int] = render_text(A__ , **A__ )
lowercase__: Tuple = max(header_image.width , image.width )
lowercase__: Union[str, Any] = int(image.height * (new_width / image.width) )
lowercase__: Tuple = int(header_image.height * (new_width / header_image.width) )
lowercase__: List[Any] = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase__: Optional[int] = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
lowercase__: int = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class UpperCAmelCase (lowerCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase :List[str] = ['flattened_patches']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 2048 , _UpperCAmelCase = False , **_UpperCAmelCase , ):
super().__init__(**lowercase__ )
lowercase__: Dict = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
lowercase__: List[str] = do_normalize
lowercase__: Tuple = do_convert_rgb
lowercase__: Dict = max_patches
lowercase__: Optional[int] = is_vqa
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
lowercase__: str = to_channel_dimension_format(lowercase__ , ChannelDimension.FIRST )
lowercase__: Dict = torch.from_numpy(lowercase__ )
lowercase__, lowercase__: Optional[int] = patch_size['''height'''], patch_size['''width''']
lowercase__, lowercase__: List[str] = get_image_size(lowercase__ )
# maximize scale s.t.
lowercase__: Dict = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase__: Union[str, Any] = max(min(math.floor(scale * image_height / patch_height ) , lowercase__ ) , 1 )
lowercase__: str = max(min(math.floor(scale * image_width / patch_width ) , lowercase__ ) , 1 )
lowercase__: Optional[int] = max(num_feasible_rows * patch_height , 1 )
lowercase__: str = max(num_feasible_cols * patch_width , 1 )
lowercase__: Optional[Any] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=lowercase__ , antialias=lowercase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase__: Dict = torch_extract_patches(lowercase__ , lowercase__ , lowercase__ )
lowercase__: Tuple = patches.shape
lowercase__: int = patches_shape[1]
lowercase__: str = patches_shape[2]
lowercase__: Dict = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase__: Union[str, Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase__: Union[str, Any] = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 , lowercase__ ).reshape([rows * columns, 1] )
lowercase__: Optional[int] = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase__: Tuple = row_ids.to(torch.floataa )
lowercase__: str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase__: str = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase__: Dict = torch.nn.functional.pad(lowercase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase__: Any = to_numpy_array(lowercase__ )
return result
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase ):
if image.dtype == np.uinta:
lowercase__: int = image.astype(np.floataa )
# take mean across the whole `image`
lowercase__: Optional[Any] = np.mean(lowercase__ )
lowercase__: Union[str, Any] = np.std(lowercase__ )
lowercase__: Union[str, Any] = max(lowercase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , **lowercase__ )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
lowercase__: List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__: Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__: str = patch_size if patch_size is not None else self.patch_size
lowercase__: Dict = max_patches if max_patches is not None else self.max_patches
lowercase__: Union[str, Any] = self.is_vqa
if kwargs.get('''data_format''' , lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
lowercase__: Optional[int] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__: Union[str, Any] = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase__: int = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
lowercase__: Dict = kwargs.pop('''font_bytes''' , lowercase__ )
lowercase__: Union[str, Any] = kwargs.pop('''font_path''' , lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
lowercase__: Tuple = [header_text] * len(lowercase__ )
lowercase__: Any = [
render_header(lowercase__ , header_text[i] , font_bytes=lowercase__ , font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
lowercase__: Any = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
lowercase__: Union[str, Any] = [
self.extract_flattened_patches(image=lowercase__ , max_patches=lowercase__ , patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
lowercase__: Optional[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase__: Union[str, Any] = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=lowercase__ )
return encoded_outputs
| 586 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 0 |
from __future__ import annotations
def a_ (__A , __A , __A ) -> Tuple:
"""simple docstring"""
__a : List[Any] = list(range(len(A__ ) ) )
__a : int = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda __A : ratio[i] , reverse=A__ )
__a : str = 0
__a : Any = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
__a : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
__a : Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ : Dict = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def __snake_case ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Github(os.environ['GITHUB_TOKEN'] )
SCREAMING_SNAKE_CASE : int = g.get_repo('huggingface/diffusers' )
SCREAMING_SNAKE_CASE : Any = repo.get_issues(state='open' )
for issue in open_issues:
SCREAMING_SNAKE_CASE : Dict = sorted(issue.get_comments() , key=lambda __A : i.created_at , reverse=A__ )
SCREAMING_SNAKE_CASE : Any = comments[0] if len(A__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 265 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
UpperCAmelCase_ : Dict = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
UpperCAmelCase_ : int = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with open(A__ , """r""" , encoding="""utf-8""" ) as f:
__magic_name__ : Dict =json.loads(f.read() )
__magic_name__ : Dict =collections.OrderedDict()
__magic_name__ : List[Any] =collections.OrderedDict()
__magic_name__ : List[Any] =collections.OrderedDict()
with open(A__ , """r""" , encoding="""utf-8""" ) as f:
__magic_name__ : int =f.readlines()
__magic_name__ : int =[[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(A__ ):
__magic_name__ : str =b
__magic_name__ : Dict =idx
for wd in b:
__magic_name__ : Dict =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __A ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self :Tuple , __snake_case :Union[str, Any] , __snake_case :Tuple , __snake_case :Any="<|endoftext|>" , __snake_case :Optional[Any]="<|endoftext|>" , __snake_case :int="<|startoftext|>" , __snake_case :Dict="<|endoftext|>" , __snake_case :Tuple=False , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(
unk_token=lowercase__ , pad_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , do_clean_text=lowercase__ , **lowercase__ , )
if not os.path.isfile(lowercase__ ):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(lowercase__ ):
raise ValueError(
f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__magic_name__ : int =do_clean_text
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =load_vocab_and_emoji(lowercase__ , lowercase__ )
__magic_name__ : str =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def A__ ( self :Any ):
'''simple docstring'''
return len(self.raw_vocab )
def A__ ( self :Any ):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def A__ ( self :Optional[Any] , __snake_case :int ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(lowercase__ , clean=self.do_clean_text )
def A__ ( self :Any , __snake_case :List[str] ):
'''simple docstring'''
return self.vocab.get(lowercase__ , self.vocab.get(self.unk_token ) )
def A__ ( self :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(lowercase__ )
def A__ ( self :Optional[Any] , __snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] ="""""".join(lowercase__ ).strip()
return out_string
def A__ ( self :int , __snake_case :"Conversation" ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__magic_name__ : Any =input_ids[-self.model_max_length :]
return input_ids
def A__ ( self :Optional[int] , __snake_case :str , __snake_case :Optional[str] = None ):
'''simple docstring'''
__magic_name__ : Any =0
if os.path.isdir(lowercase__ ):
__magic_name__ : Dict =os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : Optional[Any] =os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__magic_name__ : Any =(
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__magic_name__ : str =(
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
__magic_name__ : Union[str, Any] =token_index
writer.write(""",""".join(lowercase__ ) + """\n""" )
index += 1
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , lowercase__ )
return vocab_file, emoji_file
class __A ( lowerCamelCase__ ):
def __init__( self :List[str] , __snake_case :Optional[int] , __snake_case :Optional[int] , __snake_case :str ):
'''simple docstring'''
__magic_name__ : List[str] =vocab # same as swe
__magic_name__ : Dict =ids_to_tokens # same as bpe
__magic_name__ : List[str] =emoji
__magic_name__ : int =np.max([len(lowercase__ ) for w in self.vocab.keys()] )
__magic_name__ : Dict =re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__magic_name__ : Optional[Any] =re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__magic_name__ : List[Any] =re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__magic_name__ : Optional[Any] =re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__magic_name__ : List[str] =re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__magic_name__ : Any =re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__magic_name__ : Any ="""─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__magic_name__ : str ="""▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__magic_name__ : Union[str, Any] =str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.ids_to_tokens )
def A__ ( self :int , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.content_repattera.sub("""<URL>""" , lowercase__ )
__magic_name__ : int =self.content_repattera.sub("""<EMAIL>""" , lowercase__ )
__magic_name__ : List[str] =self.content_repattera.sub("""<TEL>""" , lowercase__ )
__magic_name__ : Dict =self.content_repattera.sub("""<DATE>""" , lowercase__ )
__magic_name__ : List[str] =self.content_repattera.sub("""<DATE>""" , lowercase__ )
__magic_name__ : Optional[int] =self.content_repattera.sub("""<PRICE>""" , lowercase__ )
__magic_name__ : Optional[int] =content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__magic_name__ : Union[str, Any] =content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def A__ ( self :Dict , __snake_case :List[Any] , __snake_case :Union[str, Any]=False ):
'''simple docstring'''
__magic_name__ : str =text.replace(""" """ , """<SP>""" )
__magic_name__ : Union[str, Any] =text.replace(""" """ , """<SP>""" )
__magic_name__ : Optional[int] =text.replace("""\r\n""" , """<BR>""" )
__magic_name__ : Dict =text.replace("""\n""" , """<BR>""" )
__magic_name__ : List[str] =text.replace("""\r""" , """<BR>""" )
__magic_name__ : Union[str, Any] =text.replace("""\t""" , """<TAB>""" )
__magic_name__ : Optional[int] =text.replace("""—""" , """ー""" )
__magic_name__ : Optional[int] =text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__magic_name__ : Optional[int] =text.replace(lowercase__ , lowercase__ )
if clean:
__magic_name__ : Optional[Any] =self.clean_text(lowercase__ )
def check_simbol(__snake_case :str ):
__magic_name__ : Union[str, Any] =x.encode()
if len(lowercase__ ) == 1 and len(lowercase__ ) == 2:
__magic_name__ : int =(int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC_2_A_1 and c <= 0xC_2_B_F)
or (c >= 0xC_7_8_0 and c <= 0xC_7_8_3)
or (c >= 0xC_A_B_9 and c <= 0xC_B_B_F)
or (c >= 0xC_C_8_0 and c <= 0xC_D_A_2)
):
return True
return False
def checkuae(__snake_case :Dict ):
__magic_name__ : Optional[int] =x.encode()
if len(lowercase__ ) == 1 and len(lowercase__ ) == 3:
__magic_name__ : Union[str, Any] =(int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE_2_8_0_8_0 and c <= 0xE_2_B_0_7_F:
return True
return False
__magic_name__ : Any =0
__magic_name__ : Optional[Any] =[]
while pos < len(lowercase__ ):
__magic_name__ : Any =min(len(lowercase__ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__magic_name__ : Union[str, Any] =[] # (token_id, token, pos)
for e in range(lowercase__ , lowercase__ , -1 ):
__magic_name__ : List[Any] =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowercase__ ) > 2:
__magic_name__ : Union[str, Any] =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowercase__ ) > 0:
# the smallest token_id is adopted
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =sorted(lowercase__ , key=lambda __snake_case : x[0] )[0]
result.append(lowercase__ )
__magic_name__ : int =e
else:
__magic_name__ : Dict =pos + 1
__magic_name__ : Dict =text[pos:end]
if check_simbol(lowercase__ ):
result.append("""<KIGOU>""" )
elif checkuae(lowercase__ ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__magic_name__ : Any =end
return result
def A__ ( self :Optional[Any] , __snake_case :int , __snake_case :List[Any]="\n" ):
'''simple docstring'''
__magic_name__ : Dict =[]
__magic_name__ : str =[]
__magic_name__ : Any =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowercase__ ) > 0:
words.append(bytearray(lowercase__ ).decode("""utf-8""" , errors="""replace""" ) )
__magic_name__ : Any =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(lowercase__ )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(lowercase__ )
if len(lowercase__ ) > 0:
words.append(bytearray(lowercase__ ).decode("""utf-8""" , errors="""replace""" ) )
__magic_name__ : List[Any] ="""""".join(lowercase__ )
return text
| 21 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 0 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
UpperCAmelCase ="path-to-your-trained-model"
UpperCAmelCase =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
UpperCAmelCase ="A photo of sks dog in a bucket"
UpperCAmelCase =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 617 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 0 |
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if isinstance(A__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class UpperCamelCase_ :
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> int:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase__ , lowercase__ )
UpperCAmelCase = TFVisionTextDualEncoderModel(lowercase__ )
UpperCAmelCase = model(input_ids=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.get_vision_text_model(lowercase__ , lowercase__ )
UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=lowercase__ , text_model=lowercase__ )
UpperCAmelCase = model(input_ids=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.get_vision_text_model(lowercase__ , lowercase__ )
UpperCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model}
UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase__ )
UpperCAmelCase = model(input_ids=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.get_vision_text_model(lowercase__ , lowercase__ )
UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=lowercase__ , text_model=lowercase__ )
UpperCAmelCase = model(input_ids=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ )
UpperCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ )
UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(lowercase__ )
UpperCAmelCase = model(input_ids=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ )
UpperCAmelCase = after_output[0].numpy()
UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ , 1e-5 )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.get_vision_text_model(lowercase__ , lowercase__ )
UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=lowercase__ , text_model=lowercase__ )
UpperCAmelCase = model(
input_ids=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , output_attentions=lowercase__ )
UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(lowercase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = to_atuple(vision_model.config.image_size )
UpperCAmelCase = to_atuple(vision_model.config.patch_size )
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(lowercase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(lowercase__ , lowercase__ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowercase__ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase__ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase__ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowercase__ )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase__ )
@slow
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.get_pretrained_model_and_inputs()
UpperCAmelCase = model_a(**lowercase__ )
UpperCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase__ )
UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(lowercase__ )
UpperCAmelCase = model_a(**lowercase__ )
UpperCAmelCase = after_outputs[0].numpy()
UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ , 1e-5 )
@require_tf
class UpperCamelCase_ ( lowerCamelCase__ , unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
UpperCAmelCase = 13
UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase = random_attention_mask([batch_size, 4] )
UpperCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = TFViTModel(lowercase__ , name="""vision_model""" )
UpperCAmelCase = TFBertModel(lowercase__ , name="""text_model""" )
return vision_model, text_model
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = TFBertModelTester(self )
UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCamelCase_ ( lowerCamelCase__ , unittest.TestCase ):
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
UpperCAmelCase = 13
UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase = random_attention_mask([batch_size, 4] )
UpperCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.get_vision_text_model(lowercase__ , lowercase__ )
UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=lowercase__ , text_model=lowercase__ )
UpperCAmelCase = model(
input_ids=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , output_attentions=lowercase__ )
UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(lowercase__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase = to_atuple(vision_model.config.image_size )
UpperCAmelCase = to_atuple(vision_model.config.patch_size )
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(lowercase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = TFDeiTModel(lowercase__ , name="""vision_model""" )
UpperCAmelCase = TFRobertaModel(lowercase__ , name="""text_model""" )
return vision_model, text_model
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = TFDeiTModelTester(self )
UpperCAmelCase = TFRobertaModelTester(self )
UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCamelCase_ ( lowerCamelCase__ , unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
UpperCAmelCase = 13
UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase = random_attention_mask([batch_size, 4] )
UpperCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = TFCLIPVisionModel(lowercase__ , name="""vision_model""" )
UpperCAmelCase = TFBertModel(lowercase__ , name="""text_model""" )
return vision_model, text_model
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = TFCLIPVisionModelTester(self )
UpperCAmelCase = TFBertModelTester(self )
UpperCAmelCase = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=lowercase__ )
UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase__ , padding=lowercase__ , return_tensors="""np""" )
UpperCAmelCase = model(**lowercase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase__ , atol=1e-3 ) )
| 673 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Tuple , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : List[str] ):
'''simple docstring'''
super().__init__(*lowercase__ , **lowercase__ )
self.check_model_type(lowercase__ )
def _snake_case ( self : str , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=None , **_lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Any = {}, {}
if padding is not None:
__lowerCamelCase : Optional[int] = padding
if truncation is not None:
__lowerCamelCase : Optional[int] = truncation
if top_k is not None:
__lowerCamelCase : Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _lowerCamelCase : Union["Image.Image", str] , _lowerCamelCase : str = None , **_lowerCamelCase : Any ):
'''simple docstring'''
if isinstance(lowercase__ , (Image.Image, str) ) and isinstance(lowercase__ , lowercase__ ):
__lowerCamelCase : List[str] = {"""image""": image, """question""": question}
else:
__lowerCamelCase : str = image
__lowerCamelCase : List[str] = super().__call__(lowercase__ , **lowercase__ )
return results
def _snake_case ( self : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=False ):
'''simple docstring'''
__lowerCamelCase : Dict = load_image(inputs["""image"""] )
__lowerCamelCase : int = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowercase__ , truncation=lowercase__ )
__lowerCamelCase : Dict = self.image_processor(images=lowercase__ , return_tensors=self.framework )
model_inputs.update(lowercase__ )
return model_inputs
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.model(**lowercase__ )
return model_outputs
def _snake_case ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Any=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__lowerCamelCase : Optional[Any] = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase : str = model_outputs.logits.sigmoid()[0]
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = probs.topk(lowercase__ )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
__lowerCamelCase : Optional[Any] = scores.tolist()
__lowerCamelCase : Optional[Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase__ , lowercase__ )]
| 519 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__snake_case : Any = random.Random()
def a_ ( __a , __a=1.0 , __a=None , __a=None ):
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str=7 , _lowerCamelCase : List[str]=4_0_0 , _lowerCamelCase : Optional[Any]=2_0_0_0 , _lowerCamelCase : int=1 , _lowerCamelCase : Optional[Any]=0.0 , _lowerCamelCase : Tuple=1_6_0_0_0 , _lowerCamelCase : str=True , _lowerCamelCase : Optional[int]=8_0 , _lowerCamelCase : List[str]=1_6 , _lowerCamelCase : Optional[Any]=6_4 , _lowerCamelCase : Dict="hann_window" , _lowerCamelCase : Tuple=8_0 , _lowerCamelCase : Tuple=7_6_0_0 , _lowerCamelCase : int=1E-10 , _lowerCamelCase : Dict=True , ):
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = feature_size
A__ = padding_value
A__ = sampling_rate
A__ = do_normalize
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = return_attention_mask
def A__ ( self : Tuple ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def A__ ( self : List[str] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Any=False ):
def _flatten(_lowerCamelCase : List[str] ):
return list(itertools.chain(*lowercase__ ) )
if equal_length:
A__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
def A__ ( self : Tuple , _lowerCamelCase : Dict=False , _lowerCamelCase : Optional[Any]=False ):
if equal_length:
A__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] =SpeechTaFeatureExtractor
def A__ ( self : List[Any] ):
A__ = SpeechTaFeatureExtractionTester(self )
def A__ ( self : List[Any] , _lowerCamelCase : int ):
self.assertTrue(np.all(np.mean(lowercase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase__ , axis=0 ) - 1 ) < 1E-3 ) )
def A__ ( self : Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A__ = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test not batched input
A__ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
A__ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
# Test batched
A__ = feat_extract(lowercase__ , return_tensors='''np''' ).input_values
A__ = feat_extract(lowercase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ):
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
def A__ ( self : Dict ):
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A__ = ['''longest''', '''max_length''', '''do_not_pad''']
A__ = [None, 1_6_0_0, None]
for max_length, padding in zip(lowercase__ , lowercase__ ):
A__ = feat_extract(lowercase__ , padding=lowercase__ , max_length=lowercase__ , return_tensors='''np''' )
A__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def A__ ( self : Tuple ):
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = range(8_0_0 , 1_4_0_0 , 2_0_0 )
A__ = [floats_list((1, x) )[0] for x in lengths]
A__ = ['''longest''', '''max_length''', '''do_not_pad''']
A__ = [None, 1_6_0_0, None]
for max_length, padding in zip(lowercase__ , lowercase__ ):
A__ = feat_extract(lowercase__ , max_length=lowercase__ , padding=lowercase__ )
A__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def A__ ( self : Optional[Any] ):
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A__ = feat_extract(
lowercase__ , truncation=lowercase__ , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
A__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A__ ( self : int ):
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A__ = feat_extract(
lowercase__ , truncation=lowercase__ , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
A__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
A__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A__ = feat_extract(
lowercase__ , truncation=lowercase__ , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
A__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def A__ ( self : Optional[Any] ):
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = np.random.rand(1_0_0 ).astype(np.floataa )
A__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def A__ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A__ = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test feature size
A__ = feature_extractor(audio_target=lowercase__ , padding=lowercase__ , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
A__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
A__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
# Test batched
A__ = feature_extractor(lowercase__ , return_tensors='''np''' ).input_values
A__ = feature_extractor(lowercase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ):
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A__ = np.asarray(lowercase__ )
A__ = feature_extractor(lowercase__ , return_tensors='''np''' ).input_values
A__ = feature_extractor(lowercase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ):
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
def A__ ( self : str ):
A__ = self.feat_extract_tester.prepare_inputs_for_target()
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowercase__ ) == len(lowercase__ ) for x, y in zip(lowercase__ , processed_features[input_name] ) ) )
A__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase__ )
A__ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
A__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
A__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A__ ( self : Tuple ):
A__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase__ )
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
A__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
A__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A__ ( self : List[Any] ):
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = self.feat_extract_tester.prepare_inputs_for_target()
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
A__ = feat_extract.num_mel_bins # hack!
A__ = feat_extract.pad(lowercase__ , padding='''longest''' , return_tensors='''np''' )[input_name]
A__ = feat_extract.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def A__ ( self : Optional[Any] ):
A__ = self.feat_extract_dict
A__ = True
A__ = self.feature_extraction_class(**lowercase__ )
A__ = self.feat_extract_tester.prepare_inputs_for_target()
A__ = [len(lowercase__ ) for x in speech_inputs]
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
A__ = feat_extract.num_mel_bins # hack!
A__ = feat_extract.pad(lowercase__ , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , lowercase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowercase__ )
def A__ ( self : Union[str, Any] ):
A__ = self.feat_extract_dict
A__ = True
A__ = self.feature_extraction_class(**lowercase__ )
A__ = self.feat_extract_tester.prepare_inputs_for_target()
A__ = [len(lowercase__ ) for x in speech_inputs]
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
A__ = min(lowercase__ )
A__ = feat_extract.num_mel_bins # hack!
A__ = feat_extract.pad(
lowercase__ , padding='''max_length''' , max_length=lowercase__ , truncation=lowercase__ , return_tensors='''np''' )
self.assertIn('''attention_mask''' , lowercase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def A__ ( self : str , _lowerCamelCase : str ):
from datasets import load_dataset
A__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
A__ = ds.sort('''id''' ).select(range(lowercase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def A__ ( self : Optional[Any] ):
# fmt: off
A__ = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
A__ = self._load_datasamples(1 )
A__ = SpeechTaFeatureExtractor()
A__ = feature_extractor(lowercase__ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , lowercase__ , atol=1E-6 ) )
def A__ ( self : int ):
# fmt: off
A__ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
A__ = self._load_datasamples(1 )
A__ = SpeechTaFeatureExtractor()
A__ = feature_extractor(audio_target=lowercase__ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , lowercase__ , atol=1E-4 ) )
| 571 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __magic_name__ ( lowerCamelCase__):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 234 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _lowerCAmelCase( self ) -> Any:
torch.manual_seed(0 )
lowercase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
lowercase__ : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , )
torch.manual_seed(0 )
lowercase__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
lowercase__ : List[str] = CLIPTextModel(lowercase__ )
lowercase__ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Union[str, Any]:
if str(lowercase__ ).startswith('''mps''' ):
lowercase__ : Optional[int] = torch.manual_seed(lowercase__ )
else:
lowercase__ : Optional[Any] = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
lowercase__ : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : Union[str, Any] = self.get_dummy_components()
lowercase__ : Dict = TextToVideoSDPipeline(**lowercase__ )
lowercase__ : Optional[Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
lowercase__ : List[str] = self.get_dummy_inputs(lowercase__ )
lowercase__ : Union[str, Any] = '''np'''
lowercase__ : Dict = sd_pipe(**lowercase__ ).frames
lowercase__ : Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowercase__ : Optional[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase( self ) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowerCAmelCase( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ , expected_max_diff=1E-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _lowerCAmelCase( self ) -> List[Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _lowerCAmelCase( self ) -> Dict:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def _lowerCAmelCase( self ) -> Any:
pass
def _lowerCAmelCase( self ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> str:
lowercase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
lowercase__ : Any = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
lowercase__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ : Dict = pipe.to('''cuda''' )
lowercase__ : Optional[Any] = '''Spiderman is surfing'''
lowercase__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ : int = pipe(lowercase__ , generator=lowercase__ , num_inference_steps=25 , output_type='''pt''' ).frames
lowercase__ : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
lowercase__ : str = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
lowercase__ : str = pipe.to('''cuda''' )
lowercase__ : Dict = '''Spiderman is surfing'''
lowercase__ : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ : Union[str, Any] = pipe(lowercase__ , generator=lowercase__ , num_inference_steps=2 , output_type='''pt''' ).frames
lowercase__ : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 152 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase (lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = DDIMPipeline
_UpperCAmelCase :Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCAmelCase :int = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
_UpperCAmelCase :Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase :Union[str, Any] = False
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
lowercase__: Optional[int] = DDIMScheduler()
lowercase__: int = {'''unet''': unet, '''scheduler''': scheduler}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(lowercase__ ).startswith('''mps''' ):
lowercase__: Dict = torch.manual_seed(lowercase__ )
else:
lowercase__: str = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
lowercase__: int = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu'''
lowercase__: str = self.get_dummy_components()
lowercase__: str = self.pipeline_class(**lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
lowercase__: Any = self.get_dummy_inputs(lowercase__ )
lowercase__: List[str] = pipe(**lowercase__ ).images
lowercase__: int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowercase__: Union[str, Any] = np.array(
[1.0_0_0e0_0, 5.7_1_7e-0_1, 4.7_1_7e-0_1, 1.0_0_0e0_0, 0.0_0_0e0_0, 1.0_0_0e0_0, 3.0_0_0e-0_4, 0.0_0_0e0_0, 9.0_0_0e-0_4] )
lowercase__: Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase__ , 1e-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3e-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: List[str] = '''google/ddpm-cifar10-32'''
lowercase__: List[Any] = UNetaDModel.from_pretrained(lowercase__ )
lowercase__: int = DDIMScheduler()
lowercase__: Dict = DDIMPipeline(unet=lowercase__ , scheduler=lowercase__ )
ddim.to(lowercase__ )
ddim.set_progress_bar_config(disable=lowercase__ )
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: int = ddim(generator=lowercase__ , eta=0.0 , output_type='''numpy''' ).images
lowercase__: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__: str = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Union[str, Any] = '''google/ddpm-ema-bedroom-256'''
lowercase__: Optional[int] = UNetaDModel.from_pretrained(lowercase__ )
lowercase__: str = DDIMScheduler.from_pretrained(lowercase__ )
lowercase__: Any = DDIMPipeline(unet=lowercase__ , scheduler=lowercase__ )
ddpm.to(lowercase__ )
ddpm.set_progress_bar_config(disable=lowercase__ )
lowercase__: List[Any] = torch.manual_seed(0 )
lowercase__: int = ddpm(generator=lowercase__ , output_type='''numpy''' ).images
lowercase__: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__: Any = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 586 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 0 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase_ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase_ = '''=======
>>>>>>>
'''
lowerCAmelCase_ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( _A : ArgumentParser):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=lowercase__ , required=lowercase__ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=lowercase__ , required=lowercase__ , help="""Path to the HuggingFace Datasets folder.""")
train_parser.set_defaults(func=lowercase__)
def __init__( self : Optional[int] , _A : str , _A : str , *_A : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = get_logger("""datasets-cli/converting""")
_SCREAMING_SNAKE_CASE : Tuple = tfds_path
_SCREAMING_SNAKE_CASE : int = datasets_directory
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
if os.path.isdir(self._tfds_path):
_SCREAMING_SNAKE_CASE : Any = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.dirname(self._tfds_path)
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""")
_SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(self._datasets_directory)
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""")
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = {}
if os.path.isdir(self._tfds_path):
_SCREAMING_SNAKE_CASE : Any = os.listdir(lowercase__)
else:
_SCREAMING_SNAKE_CASE : str = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""")
_SCREAMING_SNAKE_CASE : Any = os.path.join(lowercase__ , lowercase__)
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowercase__ , lowercase__)
if not os.path.isfile(lowercase__) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""")
continue
with open(lowercase__ , encoding="""utf-8""") as f:
_SCREAMING_SNAKE_CASE : Optional[int] = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : int = []
for line in lines:
_SCREAMING_SNAKE_CASE : List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_SCREAMING_SNAKE_CASE : List[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
_SCREAMING_SNAKE_CASE : Dict = """"""
continue
elif "from absl import logging" in out_line:
_SCREAMING_SNAKE_CASE : int = """from datasets import logging\n"""
elif "getLogger" in out_line:
_SCREAMING_SNAKE_CASE : Optional[int] = out_line.replace("""getLogger""" , """get_logger""")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda _A: e in out_line , lowercase__))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase__) + """\n""")
out_lines.append(lowercase__)
out_lines.append(lowercase__)
continue
else:
for pattern, replacement in TO_CONVERT:
_SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(lowercase__ , lowercase__ , lowercase__)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , lowercase__)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(""","""))
_SCREAMING_SNAKE_CASE : Any = """from . import """ + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_SCREAMING_SNAKE_CASE : int = True
out_lines.append(lowercase__)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_SCREAMING_SNAKE_CASE : str = f_name.replace(""".py""" , """""")
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowercase__ , lowercase__)
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowercase__ , lowercase__)
os.makedirs(lowercase__ , exist_ok=lowercase__)
self._logger.info(f"""Adding directory {output_dir}""")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(lowercase__)
if needs_manual_update:
with_manual_update.append(lowercase__)
with open(lowercase__ , """w""" , encoding="""utf-8""") as f:
f.writelines(lowercase__)
self._logger.info(f"""Converted in {output_file}""")
for utils_file in utils_files:
try:
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.basename(lowercase__)
_SCREAMING_SNAKE_CASE : str = imports_to_builder_map[f_name.replace(""".py""" , """""")]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""")
shutil.copy(lowercase__ , lowercase__)
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""")
| 338 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 0 |
def a_ (__A , __A ) -> List[Any]:
"""simple docstring"""
__a : Optional[Any] = [1]
for i in range(2 , A__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__a : Any = []
__a : List[str] = list(range(A__ ) )
# Find permutation
while factorials:
__a : Union[str, Any] = factorials.pop()
__a , __a : Any = divmod(A__ , A__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : List[Any] = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
_SCREAMING_SNAKE_CASE : int = ['past_key_values']
_SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[str]=50_265 , _SCREAMING_SNAKE_CASE : Optional[Any]=512 , _SCREAMING_SNAKE_CASE : Optional[int]=8 , _SCREAMING_SNAKE_CASE : List[Any]=2_048 , _SCREAMING_SNAKE_CASE : List[str]=16 , _SCREAMING_SNAKE_CASE : str=8 , _SCREAMING_SNAKE_CASE : Any=2_048 , _SCREAMING_SNAKE_CASE : Tuple=16 , _SCREAMING_SNAKE_CASE : Tuple=0.0 , _SCREAMING_SNAKE_CASE : List[str]=0.0 , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : int="gelu" , _SCREAMING_SNAKE_CASE : Tuple=512 , _SCREAMING_SNAKE_CASE : List[Any]=0.1 , _SCREAMING_SNAKE_CASE : Tuple=0.0 , _SCREAMING_SNAKE_CASE : str=0.0 , _SCREAMING_SNAKE_CASE : Any=0.0_2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1 , _SCREAMING_SNAKE_CASE : List[Any]=False , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : Optional[int]=1 , _SCREAMING_SNAKE_CASE : str=2 , _SCREAMING_SNAKE_CASE : int=2 , **_SCREAMING_SNAKE_CASE : List[str] , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE : int = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE : int = decoder_attention_heads
SCREAMING_SNAKE_CASE : Dict = dropout
SCREAMING_SNAKE_CASE : Dict = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE : str = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : int = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
class lowerCAmelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Tuple = {0: 'batch'}
SCREAMING_SNAKE_CASE : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.num_layers
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE : int = {0: 'batch', 2: 'past_sequence + sequence'}
SCREAMING_SNAKE_CASE : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : str = super().outputs
else:
SCREAMING_SNAKE_CASE : List[Any] = super(lowercase__ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.num_layers
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _lowerCAmelCase ( self : Dict , _SCREAMING_SNAKE_CASE : PreTrainedTokenizer , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Any = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : str = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : Tuple = dict(**lowercase__ , **lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = common_inputs['input_ids'].shape
SCREAMING_SNAKE_CASE : Tuple = common_inputs['decoder_input_ids'].shape[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : str = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowercase__ , lowercase__ )] , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.num_layers
SCREAMING_SNAKE_CASE : Any = min(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = max(lowercase__ , lowercase__ ) - min_num_layers
SCREAMING_SNAKE_CASE : Optional[int] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowercase__ , lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def _lowerCAmelCase ( self : Any , _SCREAMING_SNAKE_CASE : PreTrainedTokenizer , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : str = seqlen + 2
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.num_attention_heads
SCREAMING_SNAKE_CASE : Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Dict = common_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE : Tuple = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def _lowerCAmelCase ( self : List[str] , _SCREAMING_SNAKE_CASE : PreTrainedTokenizer , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = compute_effective_axis_dimension(
lowercase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = compute_effective_axis_dimension(
lowercase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Any = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : str = dict(tokenizer(lowercase__ , return_tensors=lowercase__ ) )
return common_inputs
def _lowerCAmelCase ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : PreTrainedTokenizer , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Optional[Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_causal_lm(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
else:
SCREAMING_SNAKE_CASE : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
return common_inputs
def _lowerCAmelCase ( self : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = super()._flatten_past_key_values_(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
SCREAMING_SNAKE_CASE : Dict = super(lowercase__ , self )._flatten_past_key_values_(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
| 265 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def lowerCAmelCase_ ( lowerCamelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
return quad(A__ , 0 , A__ , args=(A__) )[0]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return math.pow(A__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 21 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={"vocab_file": "spiece.model"}
UpperCAmelCase ={
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase ={
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
UpperCAmelCase ="▁"
class lowerCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_="</s>" ,lowerCamelCase_="<unk>" ,lowerCamelCase_="<pad>" ,lowerCamelCase_=1_0_0 ,lowerCamelCase_=None ,lowerCamelCase_ = None ,lowerCamelCase_=True ,**lowerCamelCase_ ,) -> List[str]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A = [f'<extra_id_{i}>' for i in range(lowercase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A = len(set(filter(lambda lowerCamelCase_ : bool("""extra_id""" in str(lowercase__ ) ) ,lowercase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
A = legacy
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,extra_ids=lowercase__ ,additional_special_tokens=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,legacy=lowercase__ ,**lowercase__ ,)
A = vocab_file
A = extra_ids
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" ,lowercase__ ,)
return max_model_length
@property
def UpperCamelCase__ ( self ) -> int:
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase__ ( self ) -> int:
A = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase__ )) + [1]
return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return list(
set(filter(lambda lowerCamelCase_ : bool(re.search(r"""<extra_id_\d+>""" ,lowercase__ ) ) is not None ,self.additional_special_tokens ) ) )
def UpperCamelCase__ ( self ) -> str:
return [self._convert_token_to_id(lowercase__ ) for token in self.get_sentinel_tokens()]
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Optional[Any]:
A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Optional[Any]:
A = self._add_eos_if_not_present(lowercase__ )
if token_ids_a is None:
return token_ids_a
else:
A = self._add_eos_if_not_present(lowercase__ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> Optional[int]:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self ,lowerCamelCase_ ) -> Any:
A = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,**lowerCamelCase_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
A = SPIECE_UNDERLINE + text.replace(lowercase__ ,""" """ )
return super().tokenize(lowercase__ ,**lowercase__ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[Any]:
if not self.legacy:
A = text.startswith(lowercase__ )
if is_first:
A = text[1:]
A = self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(lowercase__ ):
A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Tuple:
if token.startswith("""<extra_id_""" ):
A = re.match(r"""<extra_id_(\d+)>""" ,lowercase__ )
A = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase__ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
if index < self.sp_model.get_piece_size():
A = self.sp_model.IdToPiece(lowercase__ )
else:
A = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
A = []
A = """"""
A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
A = True
A = []
else:
current_sub_tokens.append(lowercase__ )
A = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> str:
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
lowercase__ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,"""wb""" ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 617 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase_ :
def __init__( self , snake_case__ = "cpu" , snake_case__ = "openai/clip-vit-large-patch14" ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = device
UpperCAmelCase = CLIPTokenizerFast.from_pretrained(lowercase__ )
UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073]
UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711]
UpperCAmelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCAmelCase = torchvision.transforms.Resize(2_24 )
UpperCAmelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.resize(lowercase__ )
UpperCAmelCase = self.center_crop(lowercase__ )
UpperCAmelCase = self.normalize(lowercase__ )
return images
def __call__( self , snake_case__=None , snake_case__=None , **snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(text=lowercase__ , **lowercase__ )
UpperCAmelCase = self.preprocess_img(lowercase__ )
UpperCAmelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , snake_case__=10 , snake_case__=0.01 , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=True , snake_case__="image" , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = None
UpperCAmelCase = device if device else get_device()
if vqgan:
UpperCAmelCase = vqgan
else:
UpperCAmelCase = load_vqgan(self.device , conf_path=lowercase__ , ckpt_path=lowercase__ )
self.vqgan.eval()
if clip:
UpperCAmelCase = clip
else:
UpperCAmelCase = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
UpperCAmelCase = ProcessorGradientFlow(device=self.device )
UpperCAmelCase = iterations
UpperCAmelCase = lr
UpperCAmelCase = log
UpperCAmelCase = make_grid
UpperCAmelCase = return_val
UpperCAmelCase = quantize
UpperCAmelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__=5 , snake_case__=True ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = []
if output_path is None:
UpperCAmelCase = """./animation.gif"""
if input_path is None:
UpperCAmelCase = self.save_path
UpperCAmelCase = sorted(glob(input_path + """/*""" ) )
if not len(lowercase__ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(lowercase__ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
UpperCAmelCase = total_duration / len(lowercase__ )
UpperCAmelCase = [frame_duration] * len(lowercase__ )
if extend_frames:
UpperCAmelCase = 1.5
UpperCAmelCase = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(lowercase__ ) )
imageio.mimsave(lowercase__ , lowercase__ , duration=lowercase__ )
print(f'''gif saved to {output_path}''' )
def UpperCamelCase_ ( self , snake_case__=None , snake_case__=None ) -> str:
"""simple docstring"""
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
UpperCAmelCase = preprocess(Image.open(lowercase__ ) , target_image_size=2_56 ).to(self.device )
UpperCAmelCase = preprocess_vqgan(lowercase__ )
UpperCAmelCase , *UpperCAmelCase = self.vqgan.encode(lowercase__ )
return z
def UpperCamelCase_ ( self , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.latent.detach().requires_grad_()
UpperCAmelCase = base_latent + transform_vector
if self.quantize:
UpperCAmelCase , *UpperCAmelCase = self.vqgan.quantize(lowercase__ )
else:
UpperCAmelCase = trans_latent
return self.vqgan.decode(lowercase__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.clip_preprocessor(text=lowercase__ , images=lowercase__ , return_tensors="""pt""" , padding=lowercase__ )
UpperCAmelCase = self.clip(**lowercase__ )
UpperCAmelCase = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = self._get_clip_similarity(pos_prompts["""prompts"""] , lowercase__ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
UpperCAmelCase = self._get_clip_similarity(neg_prompts["""prompts"""] , lowercase__ , weights=neg_prompts["""weights"""] )
else:
UpperCAmelCase = torch.tensor([1] , device=self.device )
UpperCAmelCase = -torch.log(lowercase__ ) + torch.log(lowercase__ )
return loss
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = torch.randn_like(self.latent , requires_grad=lowercase__ , device=self.device )
UpperCAmelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase = self._add_vector(lowercase__ )
UpperCAmelCase = loop_post_process(lowercase__ )
UpperCAmelCase = self._get_CLIP_loss(lowercase__ , lowercase__ , lowercase__ )
print("""CLIP loss""" , lowercase__ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=lowercase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
"""simple docstring"""
wandb.init(reinit=lowercase__ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
UpperCAmelCase = Image.open(lowercase__ )
UpperCAmelCase = image.resize((2_56, 2_56) )
wandb.log("""Original Image""" , wandb.Image(lowercase__ ) )
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
if not prompts:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if isinstance(lowercase__ , lowercase__ ):
UpperCAmelCase = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(lowercase__ , (tuple, list) ):
UpperCAmelCase = prompt[0]
UpperCAmelCase = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase , UpperCAmelCase = prompt.split(""":""" )
UpperCAmelCase = float(lowercase__ )
else:
UpperCAmelCase = prompt
UpperCAmelCase = 1.0
processed_prompts.append(lowercase__ )
weights.append(lowercase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase__ , device=self.device ),
}
def UpperCamelCase_ ( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__=None , ) -> Tuple:
"""simple docstring"""
if image_path:
UpperCAmelCase = self._get_latent(lowercase__ )
else:
UpperCAmelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowercase__ , lowercase__ , lowercase__ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase = self.process_prompts(lowercase__ )
UpperCAmelCase = self.process_prompts(lowercase__ )
if save_final and save_path is None:
UpperCAmelCase = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
else:
UpperCAmelCase = save_path + """_""" + get_timestamp()
os.makedirs(lowercase__ )
UpperCAmelCase = save_path
UpperCAmelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(lowercase__ ) )
UpperCAmelCase = loop_post_process(lowercase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase__ , lowercase__ , lowercase__ ) ):
if show_intermediate:
show_pil(lowercase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(lowercase__ )} )
if show_final:
show_pil(lowercase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 673 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCamelCase ( lowerCamelCase__,lowerCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[Any] , _lowerCamelCase : int = 1_2_8 , _lowerCamelCase : int = 2_5_6 , _lowerCamelCase : float = 2_0_0_0.0 , _lowerCamelCase : int = 7_6_8 , _lowerCamelCase : int = 1_2 , _lowerCamelCase : int = 1_2 , _lowerCamelCase : int = 6_4 , _lowerCamelCase : int = 2_0_4_8 , _lowerCamelCase : float = 0.1 , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Any = nn.Sequential(
nn.Linear(lowercase__ , d_model * 4 , bias=lowercase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowercase__ ) , nn.SiLU() , )
__lowerCamelCase : Dict = nn.Embedding(lowercase__ , lowercase__ )
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
__lowerCamelCase : Optional[Any] = nn.Dropout(p=lowercase__ )
__lowerCamelCase : Optional[int] = nn.ModuleList()
for lyr_num in range(lowercase__ ):
# FiLM conditional T5 decoder
__lowerCamelCase : Any = DecoderLayer(d_model=lowercase__ , d_kv=lowercase__ , num_heads=lowercase__ , d_ff=lowercase__ , dropout_rate=lowercase__ )
self.decoders.append(lowercase__ )
__lowerCamelCase : int = TaLayerNorm(lowercase__ )
__lowerCamelCase : Optional[int] = nn.Dropout(p=lowercase__ )
__lowerCamelCase : Optional[Any] = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
def _snake_case ( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _snake_case ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowerCamelCase : Tuple = self.conditioning_emb(lowercase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase : Optional[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase : List[Any] = torch.broadcast_to(
torch.arange(lowercase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowerCamelCase : List[str] = self.position_encoding(lowercase__ )
__lowerCamelCase : Any = self.continuous_inputs_projection(lowercase__ )
inputs += position_encodings
__lowerCamelCase : List[Any] = self.dropout(lowercase__ )
# decoder: No padding present.
__lowerCamelCase : Optional[Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase : Union[str, Any] = [(x, self.encoder_decoder_mask(lowercase__ , lowercase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase : Optional[int] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowerCamelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowerCamelCase : int = lyr(
lowercase__ , conditioning_emb=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )[0]
__lowerCamelCase : Dict = self.decoder_norm(lowercase__ )
__lowerCamelCase : List[str] = self.post_dropout(lowercase__ )
__lowerCamelCase : int = self.spec_out(lowercase__ )
return spec_out
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : List[Any]=1E-6 ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowercase__ , d_kv=lowercase__ , num_heads=lowercase__ , dropout_rate=lowercase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowercase__ , d_kv=lowercase__ , num_heads=lowercase__ , dropout_rate=lowercase__ , layer_norm_epsilon=lowercase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowercase__ , d_ff=lowercase__ , dropout_rate=lowercase__ , layer_norm_epsilon=lowercase__ ) )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Tuple=None , ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = self.layer[0](
lowercase__ , conditioning_emb=lowercase__ , attention_mask=lowercase__ , )
if encoder_hidden_states is not None:
__lowerCamelCase : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
__lowerCamelCase : List[Any] = self.layer[1](
lowercase__ , key_value_states=lowercase__ , attention_mask=lowercase__ , )
# Apply Film Conditional Feed Forward layer
__lowerCamelCase : Union[str, Any] = self.layer[-1](lowercase__ , lowercase__ )
return (hidden_states,)
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Optional[int] = TaLayerNorm(lowercase__ )
__lowerCamelCase : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase__ )
__lowerCamelCase : List[Any] = Attention(query_dim=lowercase__ , heads=lowercase__ , dim_head=lowercase__ , out_bias=lowercase__ , scale_qk=lowercase__ )
__lowerCamelCase : List[Any] = nn.Dropout(lowercase__ )
def _snake_case ( self : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int=None , _lowerCamelCase : List[Any]=None , ):
'''simple docstring'''
__lowerCamelCase : Tuple = self.layer_norm(lowercase__ )
if conditioning_emb is not None:
__lowerCamelCase : Optional[Any] = self.FiLMLayer(lowercase__ , lowercase__ )
# Self-attention block
__lowerCamelCase : int = self.attention(lowercase__ )
__lowerCamelCase : str = hidden_states + self.dropout(lowercase__ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : str = Attention(query_dim=lowercase__ , heads=lowercase__ , dim_head=lowercase__ , out_bias=lowercase__ , scale_qk=lowercase__ )
__lowerCamelCase : Any = TaLayerNorm(lowercase__ , eps=lowercase__ )
__lowerCamelCase : Optional[int] = nn.Dropout(lowercase__ )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict=None , _lowerCamelCase : Any=None , ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = self.layer_norm(lowercase__ )
__lowerCamelCase : str = self.attention(
lowercase__ , encoder_hidden_states=lowercase__ , attention_mask=attention_mask.squeeze(1 ) , )
__lowerCamelCase : Union[str, Any] = hidden_states + self.dropout(lowercase__ )
return layer_output
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Dict = TaDenseGatedActDense(d_model=lowercase__ , d_ff=lowercase__ , dropout_rate=lowercase__ )
__lowerCamelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase__ )
__lowerCamelCase : Tuple = TaLayerNorm(lowercase__ , eps=lowercase__ )
__lowerCamelCase : List[str] = nn.Dropout(lowercase__ )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple=None ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.layer_norm(lowercase__ )
if conditioning_emb is not None:
__lowerCamelCase : Tuple = self.film(lowercase__ , lowercase__ )
__lowerCamelCase : Tuple = self.DenseReluDense(lowercase__ )
__lowerCamelCase : Optional[int] = hidden_states + self.dropout(lowercase__ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Optional[int] = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
__lowerCamelCase : Tuple = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
__lowerCamelCase : Tuple = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
__lowerCamelCase : int = nn.Dropout(lowercase__ )
__lowerCamelCase : Any = NewGELUActivation()
def _snake_case ( self : List[str] , _lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.act(self.wi_a(lowercase__ ) )
__lowerCamelCase : Optional[Any] = self.wi_a(lowercase__ )
__lowerCamelCase : Any = hidden_gelu * hidden_linear
__lowerCamelCase : str = self.dropout(lowercase__ )
__lowerCamelCase : List[str] = self.wo(lowercase__ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : str=1E-6 ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : List[Any] = nn.Parameter(torch.ones(lowercase__ ) )
__lowerCamelCase : int = eps
def _snake_case ( self : Optional[int] , _lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowercase__ )
__lowerCamelCase : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase : List[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def _snake_case ( self : List[Any] , _lowerCamelCase : torch.Tensor ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(lowercase__ , 3.0 )) ))
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Optional[Any] = nn.Linear(lowercase__ , out_features * 2 , bias=lowercase__ )
def _snake_case ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCamelCase : Dict = self.scale_bias(lowercase__ )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = torch.chunk(lowercase__ , 2 , -1 )
__lowerCamelCase : Union[str, Any] = x * (1 + scale) + shift
return x
| 519 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Dict = '▁'
__snake_case : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
__snake_case : Dict = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
__snake_case : str = {
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
__snake_case : List[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
_lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
_lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : str =PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[int] =['input_ids', 'attention_mask']
_lowerCamelCase : List[int] =[]
_lowerCamelCase : List[int] =[]
def __init__( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any]="<s>" , _lowerCamelCase : str="</s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : List[Any]="<s>" , _lowerCamelCase : List[str]="<unk>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : int="<mask>" , _lowerCamelCase : Tuple=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , _lowerCamelCase : Any=None , _lowerCamelCase : Any=False , **_lowerCamelCase : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = legacy_behaviour
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , tokenizer_file=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowercase__ , **lowercase__ , )
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
A__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ = 1
A__ = len(self.sp_model )
A__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase__ )
}
A__ = {v: k for k, v in self.lang_code_to_id.items()}
A__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A__ = src_lang if src_lang is not None else '''eng_Latn'''
A__ = self.lang_code_to_id[self._src_lang]
A__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[int] ):
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , _lowerCamelCase : List[str] ):
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def A__ ( self : Tuple ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A__ ( self : Optional[Any] ):
return self._src_lang
@src_lang.setter
def A__ ( self : Tuple , _lowerCamelCase : str ):
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A__ ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def A__ ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Optional[str] , _lowerCamelCase : Optional[str] , **_lowerCamelCase : str ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A__ = src_lang
A__ = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
A__ = self.convert_tokens_to_ids(lowercase__ )
A__ = tgt_lang_id
return inputs
def A__ ( self : Any ):
A__ = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self : List[Any] , _lowerCamelCase : str ):
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def A__ ( self : Optional[Any] , _lowerCamelCase : int ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self : str , _lowerCamelCase : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ ( self : Optional[int] , _lowerCamelCase : Tuple ):
A__ = ''''''.join(lowercase__ ).replace(lowercase__ , ''' ''' ).strip()
return out_string
def A__ ( self : int , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , '''wb''' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def A__ ( self : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str = "eng_Latn" , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : str = "fra_Latn" , **_lowerCamelCase : Union[str, Any] , ):
A__ = src_lang
A__ = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def A__ ( self : int ):
return self.set_src_lang_special_tokens(self.src_lang )
def A__ ( self : Optional[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ ( self : Any , _lowerCamelCase : Optional[Any] ):
A__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
def A__ ( self : List[Any] , _lowerCamelCase : str ):
A__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
| 571 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __magic_name__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : Node | None = None
SCREAMING_SNAKE_CASE__ : Node | None = None
def a ():
SCREAMING_SNAKE_CASE_ = Node(1 )
SCREAMING_SNAKE_CASE_ = Node(2 )
SCREAMING_SNAKE_CASE_ = Node(3 )
SCREAMING_SNAKE_CASE_ = Node(4 )
SCREAMING_SNAKE_CASE_ = Node(5 )
return tree
def a (_lowerCAmelCase ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a (_lowerCAmelCase ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a (_lowerCAmelCase ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a (_lowerCAmelCase ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = []
if root is None:
return output
SCREAMING_SNAKE_CASE_ = deque([root] )
while process_queue:
SCREAMING_SNAKE_CASE_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = []
def populate_output(_lowerCAmelCase , _lowerCAmelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A__ , A__ )
return output
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = []
def populate_output(_lowerCAmelCase , _lowerCAmelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A__ , A__ )
return output
def a (_lowerCAmelCase ):
if root is None:
return []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = height(A__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A__ , A__ ) )
SCREAMING_SNAKE_CASE_ = 1
else:
output.append(get_nodes_from_right_to_left(A__ , A__ ) )
SCREAMING_SNAKE_CASE_ = 0
return output
def a (): # Main function for testing.
SCREAMING_SNAKE_CASE_ = make_tree()
print(F"In-order Traversal: {inorder(A__ )}" )
print(F"Pre-order Traversal: {preorder(A__ )}" )
print(F"Post-order Traversal: {postorder(A__ )}" , '''\n''' )
print(F"Height of Tree: {height(A__ )}" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(A__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(A__ ) + 1 ):
print(F"Level {level}:" , get_nodes_from_left_to_right(A__ , level=A__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 234 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.